1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 188 189 /* 190 * DTrace Locking 191 * DTrace is protected by three (relatively coarse-grained) locks: 192 * 193 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 194 * including enabling state, probes, ECBs, consumer state, helper state, 195 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 196 * probe context is lock-free -- synchronization is handled via the 197 * dtrace_sync() cross call mechanism. 198 * 199 * (2) dtrace_provider_lock is required when manipulating provider state, or 200 * when provider state must be held constant. 201 * 202 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 203 * when meta provider state must be held constant. 204 * 205 * The lock ordering between these three locks is dtrace_meta_lock before 206 * dtrace_provider_lock before dtrace_lock. (In particular, there are 207 * several places where dtrace_provider_lock is held by the framework as it 208 * calls into the providers -- which then call back into the framework, 209 * grabbing dtrace_lock.) 210 * 211 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 212 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 213 * role as a coarse-grained lock; it is acquired before both of these locks. 214 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 215 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 216 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 217 * acquired _between_ dtrace_provider_lock and dtrace_lock. 218 */ 219 static kmutex_t dtrace_lock; /* probe state lock */ 220 static kmutex_t dtrace_provider_lock; /* provider state lock */ 221 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 222 223 /* 224 * DTrace Provider Variables 225 * 226 * These are the variables relating to DTrace as a provider (that is, the 227 * provider of the BEGIN, END, and ERROR probes). 228 */ 229 static dtrace_pattr_t dtrace_provider_attr = { 230 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 235 }; 236 237 static void 238 dtrace_nullop(void) 239 {} 240 241 static dtrace_pops_t dtrace_provider_ops = { 242 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 243 (void (*)(void *, struct modctl *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 248 NULL, 249 NULL, 250 NULL, 251 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 252 }; 253 254 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 255 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 256 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 257 258 /* 259 * DTrace Helper Tracing Variables 260 */ 261 uint32_t dtrace_helptrace_next = 0; 262 uint32_t dtrace_helptrace_nlocals; 263 char *dtrace_helptrace_buffer; 264 int dtrace_helptrace_bufsize = 512 * 1024; 265 266 #ifdef DEBUG 267 int dtrace_helptrace_enabled = 1; 268 #else 269 int dtrace_helptrace_enabled = 0; 270 #endif 271 272 /* 273 * DTrace Error Hashing 274 * 275 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 276 * table. This is very useful for checking coverage of tests that are 277 * expected to induce DIF or DOF processing errors, and may be useful for 278 * debugging problems in the DIF code generator or in DOF generation . The 279 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 280 */ 281 #ifdef DEBUG 282 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 283 static const char *dtrace_errlast; 284 static kthread_t *dtrace_errthread; 285 static kmutex_t dtrace_errlock; 286 #endif 287 288 /* 289 * DTrace Macros and Constants 290 * 291 * These are various macros that are useful in various spots in the 292 * implementation, along with a few random constants that have no meaning 293 * outside of the implementation. There is no real structure to this cpp 294 * mishmash -- but is there ever? 295 */ 296 #define DTRACE_HASHSTR(hash, probe) \ 297 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 298 299 #define DTRACE_HASHNEXT(hash, probe) \ 300 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 301 302 #define DTRACE_HASHPREV(hash, probe) \ 303 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 304 305 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 306 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 307 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 308 309 #define DTRACE_AGGHASHSIZE_SLEW 17 310 311 /* 312 * The key for a thread-local variable consists of the lower 61 bits of the 313 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 314 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 315 * equal to a variable identifier. This is necessary (but not sufficient) to 316 * assure that global associative arrays never collide with thread-local 317 * variables. To guarantee that they cannot collide, we must also define the 318 * order for keying dynamic variables. That order is: 319 * 320 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 321 * 322 * Because the variable-key and the tls-key are in orthogonal spaces, there is 323 * no way for a global variable key signature to match a thread-local key 324 * signature. 325 */ 326 #define DTRACE_TLS_THRKEY(where) { \ 327 uint_t intr = 0; \ 328 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 329 for (; actv; actv >>= 1) \ 330 intr++; \ 331 ASSERT(intr < (1 << 3)); \ 332 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 333 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 334 } 335 336 #define DT_BSWAP_8(x) ((x) & 0xff) 337 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 338 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 339 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 340 341 #define DTRACE_STORE(type, tomax, offset, what) \ 342 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 343 344 #ifndef __i386 345 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 346 if (addr & (size - 1)) { \ 347 *flags |= CPU_DTRACE_BADALIGN; \ 348 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 349 return (0); \ 350 } 351 #else 352 #define DTRACE_ALIGNCHECK(addr, size, flags) 353 #endif 354 355 /* 356 * Test whether a range of memory starting at testaddr of size testsz falls 357 * within the range of memory described by addr, sz, taking care to avoid 358 * problems with overflow and underflow of the unsigned quantities. 359 */ 360 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 361 ((testaddr) - (baseaddr) < (basesz) && \ 362 (testaddr) + (testsz) - (baseaddr) <= (basesz)) 363 364 #define DTRACE_LOADFUNC(bits) \ 365 /*CSTYLED*/ \ 366 uint##bits##_t \ 367 dtrace_load##bits(uintptr_t addr) \ 368 { \ 369 size_t size = bits / NBBY; \ 370 /*CSTYLED*/ \ 371 uint##bits##_t rval; \ 372 int i; \ 373 volatile uint16_t *flags = (volatile uint16_t *) \ 374 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 375 \ 376 DTRACE_ALIGNCHECK(addr, size, flags); \ 377 \ 378 for (i = 0; i < dtrace_toxranges; i++) { \ 379 if (addr >= dtrace_toxrange[i].dtt_limit) \ 380 continue; \ 381 \ 382 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 383 continue; \ 384 \ 385 /* \ 386 * This address falls within a toxic region; return 0. \ 387 */ \ 388 *flags |= CPU_DTRACE_BADADDR; \ 389 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 390 return (0); \ 391 } \ 392 \ 393 *flags |= CPU_DTRACE_NOFAULT; \ 394 /*CSTYLED*/ \ 395 rval = *((volatile uint##bits##_t *)addr); \ 396 *flags &= ~CPU_DTRACE_NOFAULT; \ 397 \ 398 return (rval); \ 399 } 400 401 #ifdef _LP64 402 #define dtrace_loadptr dtrace_load64 403 #else 404 #define dtrace_loadptr dtrace_load32 405 #endif 406 407 #define DTRACE_DYNHASH_FREE 0 408 #define DTRACE_DYNHASH_SINK 1 409 #define DTRACE_DYNHASH_VALID 2 410 411 #define DTRACE_MATCH_NEXT 0 412 #define DTRACE_MATCH_DONE 1 413 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 414 #define DTRACE_STATE_ALIGN 64 415 416 #define DTRACE_FLAGS2FLT(flags) \ 417 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 418 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 419 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 420 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 421 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 422 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 423 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 424 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 425 DTRACEFLT_UNKNOWN) 426 427 #define DTRACEACT_ISSTRING(act) \ 428 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 429 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 430 431 static size_t dtrace_strlen(const char *, size_t); 432 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 433 static void dtrace_enabling_provide(dtrace_provider_t *); 434 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 435 static void dtrace_enabling_matchall(void); 436 static dtrace_state_t *dtrace_anon_grab(void); 437 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 438 dtrace_state_t *, uint64_t, uint64_t); 439 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 440 static void dtrace_buffer_drop(dtrace_buffer_t *); 441 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 442 dtrace_state_t *, dtrace_mstate_t *); 443 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 444 dtrace_optval_t); 445 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 446 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 447 448 /* 449 * DTrace Probe Context Functions 450 * 451 * These functions are called from probe context. Because probe context is 452 * any context in which C may be called, arbitrarily locks may be held, 453 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 454 * As a result, functions called from probe context may only call other DTrace 455 * support functions -- they may not interact at all with the system at large. 456 * (Note that the ASSERT macro is made probe-context safe by redefining it in 457 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 458 * loads are to be performed from probe context, they _must_ be in terms of 459 * the safe dtrace_load*() variants. 460 * 461 * Some functions in this block are not actually called from probe context; 462 * for these functions, there will be a comment above the function reading 463 * "Note: not called from probe context." 464 */ 465 void 466 dtrace_panic(const char *format, ...) 467 { 468 va_list alist; 469 470 va_start(alist, format); 471 dtrace_vpanic(format, alist); 472 va_end(alist); 473 } 474 475 int 476 dtrace_assfail(const char *a, const char *f, int l) 477 { 478 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 479 480 /* 481 * We just need something here that even the most clever compiler 482 * cannot optimize away. 483 */ 484 return (a[(uintptr_t)f]); 485 } 486 487 /* 488 * Atomically increment a specified error counter from probe context. 489 */ 490 static void 491 dtrace_error(uint32_t *counter) 492 { 493 /* 494 * Most counters stored to in probe context are per-CPU counters. 495 * However, there are some error conditions that are sufficiently 496 * arcane that they don't merit per-CPU storage. If these counters 497 * are incremented concurrently on different CPUs, scalability will be 498 * adversely affected -- but we don't expect them to be white-hot in a 499 * correctly constructed enabling... 500 */ 501 uint32_t oval, nval; 502 503 do { 504 oval = *counter; 505 506 if ((nval = oval + 1) == 0) { 507 /* 508 * If the counter would wrap, set it to 1 -- assuring 509 * that the counter is never zero when we have seen 510 * errors. (The counter must be 32-bits because we 511 * aren't guaranteed a 64-bit compare&swap operation.) 512 * To save this code both the infamy of being fingered 513 * by a priggish news story and the indignity of being 514 * the target of a neo-puritan witch trial, we're 515 * carefully avoiding any colorful description of the 516 * likelihood of this condition -- but suffice it to 517 * say that it is only slightly more likely than the 518 * overflow of predicate cache IDs, as discussed in 519 * dtrace_predicate_create(). 520 */ 521 nval = 1; 522 } 523 } while (dtrace_cas32(counter, oval, nval) != oval); 524 } 525 526 /* 527 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 528 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 529 */ 530 DTRACE_LOADFUNC(8) 531 DTRACE_LOADFUNC(16) 532 DTRACE_LOADFUNC(32) 533 DTRACE_LOADFUNC(64) 534 535 static int 536 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 537 { 538 if (dest < mstate->dtms_scratch_base) 539 return (0); 540 541 if (dest + size < dest) 542 return (0); 543 544 if (dest + size > mstate->dtms_scratch_ptr) 545 return (0); 546 547 return (1); 548 } 549 550 static int 551 dtrace_canstore_statvar(uint64_t addr, size_t sz, 552 dtrace_statvar_t **svars, int nsvars) 553 { 554 int i; 555 556 for (i = 0; i < nsvars; i++) { 557 dtrace_statvar_t *svar = svars[i]; 558 559 if (svar == NULL || svar->dtsv_size == 0) 560 continue; 561 562 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 563 return (1); 564 } 565 566 return (0); 567 } 568 569 /* 570 * Check to see if the address is within a memory region to which a store may 571 * be issued. This includes the DTrace scratch areas, and any DTrace variable 572 * region. The caller of dtrace_canstore() is responsible for performing any 573 * alignment checks that are needed before stores are actually executed. 574 */ 575 static int 576 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 577 dtrace_vstate_t *vstate) 578 { 579 /* 580 * First, check to see if the address is in scratch space... 581 */ 582 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 583 mstate->dtms_scratch_size)) 584 return (1); 585 586 /* 587 * Now check to see if it's a dynamic variable. This check will pick 588 * up both thread-local variables and any global dynamically-allocated 589 * variables. 590 */ 591 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 592 vstate->dtvs_dynvars.dtds_size)) 593 return (1); 594 595 /* 596 * Finally, check the static local and global variables. These checks 597 * take the longest, so we perform them last. 598 */ 599 if (dtrace_canstore_statvar(addr, sz, 600 vstate->dtvs_locals, vstate->dtvs_nlocals)) 601 return (1); 602 603 if (dtrace_canstore_statvar(addr, sz, 604 vstate->dtvs_globals, vstate->dtvs_nglobals)) 605 return (1); 606 607 return (0); 608 } 609 610 611 /* 612 * Convenience routine to check to see if the address is within a memory 613 * region in which a load may be issued given the user's privilege level; 614 * if not, it sets the appropriate error flags and loads 'addr' into the 615 * illegal value slot. 616 * 617 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 618 * appropriate memory access protection. 619 */ 620 static int 621 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 622 dtrace_vstate_t *vstate) 623 { 624 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 625 626 /* 627 * If we hold the privilege to read from kernel memory, then 628 * everything is readable. 629 */ 630 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 631 return (1); 632 633 /* 634 * You can obviously read that which you can store. 635 */ 636 if (dtrace_canstore(addr, sz, mstate, vstate)) 637 return (1); 638 639 /* 640 * We're allowed to read from our own string table. 641 */ 642 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 643 mstate->dtms_difo->dtdo_strlen)) 644 return (1); 645 646 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 647 *illval = addr; 648 return (0); 649 } 650 651 /* 652 * Convenience routine to check to see if a given string is within a memory 653 * region in which a load may be issued given the user's privilege level; 654 * this exists so that we don't need to issue unnecessary dtrace_strlen() 655 * calls in the event that the user has all privileges. 656 */ 657 static int 658 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 659 dtrace_vstate_t *vstate) 660 { 661 size_t strsz; 662 663 /* 664 * If we hold the privilege to read from kernel memory, then 665 * everything is readable. 666 */ 667 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 668 return (1); 669 670 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 671 if (dtrace_canload(addr, strsz, mstate, vstate)) 672 return (1); 673 674 return (0); 675 } 676 677 /* 678 * Convenience routine to check to see if a given variable is within a memory 679 * region in which a load may be issued given the user's privilege level. 680 */ 681 static int 682 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 683 dtrace_vstate_t *vstate) 684 { 685 size_t sz; 686 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 687 688 /* 689 * If we hold the privilege to read from kernel memory, then 690 * everything is readable. 691 */ 692 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 693 return (1); 694 695 if (type->dtdt_kind == DIF_TYPE_STRING) 696 sz = dtrace_strlen(src, 697 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 698 else 699 sz = type->dtdt_size; 700 701 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 702 } 703 704 /* 705 * Compare two strings using safe loads. 706 */ 707 static int 708 dtrace_strncmp(char *s1, char *s2, size_t limit) 709 { 710 uint8_t c1, c2; 711 volatile uint16_t *flags; 712 713 if (s1 == s2 || limit == 0) 714 return (0); 715 716 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 717 718 do { 719 if (s1 == NULL) { 720 c1 = '\0'; 721 } else { 722 c1 = dtrace_load8((uintptr_t)s1++); 723 } 724 725 if (s2 == NULL) { 726 c2 = '\0'; 727 } else { 728 c2 = dtrace_load8((uintptr_t)s2++); 729 } 730 731 if (c1 != c2) 732 return (c1 - c2); 733 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 734 735 return (0); 736 } 737 738 /* 739 * Compute strlen(s) for a string using safe memory accesses. The additional 740 * len parameter is used to specify a maximum length to ensure completion. 741 */ 742 static size_t 743 dtrace_strlen(const char *s, size_t lim) 744 { 745 uint_t len; 746 747 for (len = 0; len != lim; len++) { 748 if (dtrace_load8((uintptr_t)s++) == '\0') 749 break; 750 } 751 752 return (len); 753 } 754 755 /* 756 * Check if an address falls within a toxic region. 757 */ 758 static int 759 dtrace_istoxic(uintptr_t kaddr, size_t size) 760 { 761 uintptr_t taddr, tsize; 762 int i; 763 764 for (i = 0; i < dtrace_toxranges; i++) { 765 taddr = dtrace_toxrange[i].dtt_base; 766 tsize = dtrace_toxrange[i].dtt_limit - taddr; 767 768 if (kaddr - taddr < tsize) { 769 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 770 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 771 return (1); 772 } 773 774 if (taddr - kaddr < size) { 775 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 776 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 777 return (1); 778 } 779 } 780 781 return (0); 782 } 783 784 /* 785 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 786 * memory specified by the DIF program. The dst is assumed to be safe memory 787 * that we can store to directly because it is managed by DTrace. As with 788 * standard bcopy, overlapping copies are handled properly. 789 */ 790 static void 791 dtrace_bcopy(const void *src, void *dst, size_t len) 792 { 793 if (len != 0) { 794 uint8_t *s1 = dst; 795 const uint8_t *s2 = src; 796 797 if (s1 <= s2) { 798 do { 799 *s1++ = dtrace_load8((uintptr_t)s2++); 800 } while (--len != 0); 801 } else { 802 s2 += len; 803 s1 += len; 804 805 do { 806 *--s1 = dtrace_load8((uintptr_t)--s2); 807 } while (--len != 0); 808 } 809 } 810 } 811 812 /* 813 * Copy src to dst using safe memory accesses, up to either the specified 814 * length, or the point that a nul byte is encountered. The src is assumed to 815 * be unsafe memory specified by the DIF program. The dst is assumed to be 816 * safe memory that we can store to directly because it is managed by DTrace. 817 * Unlike dtrace_bcopy(), overlapping regions are not handled. 818 */ 819 static void 820 dtrace_strcpy(const void *src, void *dst, size_t len) 821 { 822 if (len != 0) { 823 uint8_t *s1 = dst, c; 824 const uint8_t *s2 = src; 825 826 do { 827 *s1++ = c = dtrace_load8((uintptr_t)s2++); 828 } while (--len != 0 && c != '\0'); 829 } 830 } 831 832 /* 833 * Copy src to dst, deriving the size and type from the specified (BYREF) 834 * variable type. The src is assumed to be unsafe memory specified by the DIF 835 * program. The dst is assumed to be DTrace variable memory that is of the 836 * specified type; we assume that we can store to directly. 837 */ 838 static void 839 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 840 { 841 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 842 843 if (type->dtdt_kind == DIF_TYPE_STRING) { 844 dtrace_strcpy(src, dst, type->dtdt_size); 845 } else { 846 dtrace_bcopy(src, dst, type->dtdt_size); 847 } 848 } 849 850 /* 851 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 852 * unsafe memory specified by the DIF program. The s2 data is assumed to be 853 * safe memory that we can access directly because it is managed by DTrace. 854 */ 855 static int 856 dtrace_bcmp(const void *s1, const void *s2, size_t len) 857 { 858 volatile uint16_t *flags; 859 860 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 861 862 if (s1 == s2) 863 return (0); 864 865 if (s1 == NULL || s2 == NULL) 866 return (1); 867 868 if (s1 != s2 && len != 0) { 869 const uint8_t *ps1 = s1; 870 const uint8_t *ps2 = s2; 871 872 do { 873 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 874 return (1); 875 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 876 } 877 return (0); 878 } 879 880 /* 881 * Zero the specified region using a simple byte-by-byte loop. Note that this 882 * is for safe DTrace-managed memory only. 883 */ 884 static void 885 dtrace_bzero(void *dst, size_t len) 886 { 887 uchar_t *cp; 888 889 for (cp = dst; len != 0; len--) 890 *cp++ = 0; 891 } 892 893 /* 894 * This privilege check should be used by actions and subroutines to 895 * verify that the user credentials of the process that enabled the 896 * invoking ECB match the target credentials 897 */ 898 static int 899 dtrace_priv_proc_common_user(dtrace_state_t *state) 900 { 901 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 902 903 /* 904 * We should always have a non-NULL state cred here, since if cred 905 * is null (anonymous tracing), we fast-path bypass this routine. 906 */ 907 ASSERT(s_cr != NULL); 908 909 if ((cr = CRED()) != NULL && 910 s_cr->cr_uid == cr->cr_uid && 911 s_cr->cr_uid == cr->cr_ruid && 912 s_cr->cr_uid == cr->cr_suid && 913 s_cr->cr_gid == cr->cr_gid && 914 s_cr->cr_gid == cr->cr_rgid && 915 s_cr->cr_gid == cr->cr_sgid) 916 return (1); 917 918 return (0); 919 } 920 921 /* 922 * This privilege check should be used by actions and subroutines to 923 * verify that the zone of the process that enabled the invoking ECB 924 * matches the target credentials 925 */ 926 static int 927 dtrace_priv_proc_common_zone(dtrace_state_t *state) 928 { 929 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 930 931 /* 932 * We should always have a non-NULL state cred here, since if cred 933 * is null (anonymous tracing), we fast-path bypass this routine. 934 */ 935 ASSERT(s_cr != NULL); 936 937 if ((cr = CRED()) != NULL && 938 s_cr->cr_zone == cr->cr_zone) 939 return (1); 940 941 return (0); 942 } 943 944 /* 945 * This privilege check should be used by actions and subroutines to 946 * verify that the process has not setuid or changed credentials. 947 */ 948 static int 949 dtrace_priv_proc_common_nocd() 950 { 951 proc_t *proc; 952 953 if ((proc = ttoproc(curthread)) != NULL && 954 !(proc->p_flag & SNOCD)) 955 return (1); 956 957 return (0); 958 } 959 960 static int 961 dtrace_priv_proc_destructive(dtrace_state_t *state) 962 { 963 int action = state->dts_cred.dcr_action; 964 965 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 966 dtrace_priv_proc_common_zone(state) == 0) 967 goto bad; 968 969 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 970 dtrace_priv_proc_common_user(state) == 0) 971 goto bad; 972 973 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 974 dtrace_priv_proc_common_nocd() == 0) 975 goto bad; 976 977 return (1); 978 979 bad: 980 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 981 982 return (0); 983 } 984 985 static int 986 dtrace_priv_proc_control(dtrace_state_t *state) 987 { 988 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 989 return (1); 990 991 if (dtrace_priv_proc_common_zone(state) && 992 dtrace_priv_proc_common_user(state) && 993 dtrace_priv_proc_common_nocd()) 994 return (1); 995 996 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 997 998 return (0); 999 } 1000 1001 static int 1002 dtrace_priv_proc(dtrace_state_t *state) 1003 { 1004 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1005 return (1); 1006 1007 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1008 1009 return (0); 1010 } 1011 1012 static int 1013 dtrace_priv_kernel(dtrace_state_t *state) 1014 { 1015 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1016 return (1); 1017 1018 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1019 1020 return (0); 1021 } 1022 1023 static int 1024 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1025 { 1026 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1027 return (1); 1028 1029 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1030 1031 return (0); 1032 } 1033 1034 /* 1035 * Note: not called from probe context. This function is called 1036 * asynchronously (and at a regular interval) from outside of probe context to 1037 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1038 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1039 */ 1040 void 1041 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1042 { 1043 dtrace_dynvar_t *dirty; 1044 dtrace_dstate_percpu_t *dcpu; 1045 int i, work = 0; 1046 1047 for (i = 0; i < NCPU; i++) { 1048 dcpu = &dstate->dtds_percpu[i]; 1049 1050 ASSERT(dcpu->dtdsc_rinsing == NULL); 1051 1052 /* 1053 * If the dirty list is NULL, there is no dirty work to do. 1054 */ 1055 if (dcpu->dtdsc_dirty == NULL) 1056 continue; 1057 1058 /* 1059 * If the clean list is non-NULL, then we're not going to do 1060 * any work for this CPU -- it means that there has not been 1061 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1062 * since the last time we cleaned house. 1063 */ 1064 if (dcpu->dtdsc_clean != NULL) 1065 continue; 1066 1067 work = 1; 1068 1069 /* 1070 * Atomically move the dirty list aside. 1071 */ 1072 do { 1073 dirty = dcpu->dtdsc_dirty; 1074 1075 /* 1076 * Before we zap the dirty list, set the rinsing list. 1077 * (This allows for a potential assertion in 1078 * dtrace_dynvar(): if a free dynamic variable appears 1079 * on a hash chain, either the dirty list or the 1080 * rinsing list for some CPU must be non-NULL.) 1081 */ 1082 dcpu->dtdsc_rinsing = dirty; 1083 dtrace_membar_producer(); 1084 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1085 dirty, NULL) != dirty); 1086 } 1087 1088 if (!work) { 1089 /* 1090 * We have no work to do; we can simply return. 1091 */ 1092 return; 1093 } 1094 1095 dtrace_sync(); 1096 1097 for (i = 0; i < NCPU; i++) { 1098 dcpu = &dstate->dtds_percpu[i]; 1099 1100 if (dcpu->dtdsc_rinsing == NULL) 1101 continue; 1102 1103 /* 1104 * We are now guaranteed that no hash chain contains a pointer 1105 * into this dirty list; we can make it clean. 1106 */ 1107 ASSERT(dcpu->dtdsc_clean == NULL); 1108 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1109 dcpu->dtdsc_rinsing = NULL; 1110 } 1111 1112 /* 1113 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1114 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1115 * This prevents a race whereby a CPU incorrectly decides that 1116 * the state should be something other than DTRACE_DSTATE_CLEAN 1117 * after dtrace_dynvar_clean() has completed. 1118 */ 1119 dtrace_sync(); 1120 1121 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1122 } 1123 1124 /* 1125 * Depending on the value of the op parameter, this function looks-up, 1126 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1127 * allocation is requested, this function will return a pointer to a 1128 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1129 * variable can be allocated. If NULL is returned, the appropriate counter 1130 * will be incremented. 1131 */ 1132 dtrace_dynvar_t * 1133 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1134 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1135 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1136 { 1137 uint64_t hashval = DTRACE_DYNHASH_VALID; 1138 dtrace_dynhash_t *hash = dstate->dtds_hash; 1139 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1140 processorid_t me = CPU->cpu_id, cpu = me; 1141 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1142 size_t bucket, ksize; 1143 size_t chunksize = dstate->dtds_chunksize; 1144 uintptr_t kdata, lock, nstate; 1145 uint_t i; 1146 1147 ASSERT(nkeys != 0); 1148 1149 /* 1150 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1151 * algorithm. For the by-value portions, we perform the algorithm in 1152 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1153 * bit, and seems to have only a minute effect on distribution. For 1154 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1155 * over each referenced byte. It's painful to do this, but it's much 1156 * better than pathological hash distribution. The efficacy of the 1157 * hashing algorithm (and a comparison with other algorithms) may be 1158 * found by running the ::dtrace_dynstat MDB dcmd. 1159 */ 1160 for (i = 0; i < nkeys; i++) { 1161 if (key[i].dttk_size == 0) { 1162 uint64_t val = key[i].dttk_value; 1163 1164 hashval += (val >> 48) & 0xffff; 1165 hashval += (hashval << 10); 1166 hashval ^= (hashval >> 6); 1167 1168 hashval += (val >> 32) & 0xffff; 1169 hashval += (hashval << 10); 1170 hashval ^= (hashval >> 6); 1171 1172 hashval += (val >> 16) & 0xffff; 1173 hashval += (hashval << 10); 1174 hashval ^= (hashval >> 6); 1175 1176 hashval += val & 0xffff; 1177 hashval += (hashval << 10); 1178 hashval ^= (hashval >> 6); 1179 } else { 1180 /* 1181 * This is incredibly painful, but it beats the hell 1182 * out of the alternative. 1183 */ 1184 uint64_t j, size = key[i].dttk_size; 1185 uintptr_t base = (uintptr_t)key[i].dttk_value; 1186 1187 if (!dtrace_canload(base, size, mstate, vstate)) 1188 break; 1189 1190 for (j = 0; j < size; j++) { 1191 hashval += dtrace_load8(base + j); 1192 hashval += (hashval << 10); 1193 hashval ^= (hashval >> 6); 1194 } 1195 } 1196 } 1197 1198 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1199 return (NULL); 1200 1201 hashval += (hashval << 3); 1202 hashval ^= (hashval >> 11); 1203 hashval += (hashval << 15); 1204 1205 /* 1206 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1207 * comes out to be one of our two sentinel hash values. If this 1208 * actually happens, we set the hashval to be a value known to be a 1209 * non-sentinel value. 1210 */ 1211 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1212 hashval = DTRACE_DYNHASH_VALID; 1213 1214 /* 1215 * Yes, it's painful to do a divide here. If the cycle count becomes 1216 * important here, tricks can be pulled to reduce it. (However, it's 1217 * critical that hash collisions be kept to an absolute minimum; 1218 * they're much more painful than a divide.) It's better to have a 1219 * solution that generates few collisions and still keeps things 1220 * relatively simple. 1221 */ 1222 bucket = hashval % dstate->dtds_hashsize; 1223 1224 if (op == DTRACE_DYNVAR_DEALLOC) { 1225 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1226 1227 for (;;) { 1228 while ((lock = *lockp) & 1) 1229 continue; 1230 1231 if (dtrace_casptr((void *)lockp, 1232 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1233 break; 1234 } 1235 1236 dtrace_membar_producer(); 1237 } 1238 1239 top: 1240 prev = NULL; 1241 lock = hash[bucket].dtdh_lock; 1242 1243 dtrace_membar_consumer(); 1244 1245 start = hash[bucket].dtdh_chain; 1246 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1247 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1248 op != DTRACE_DYNVAR_DEALLOC)); 1249 1250 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1251 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1252 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1253 1254 if (dvar->dtdv_hashval != hashval) { 1255 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1256 /* 1257 * We've reached the sink, and therefore the 1258 * end of the hash chain; we can kick out of 1259 * the loop knowing that we have seen a valid 1260 * snapshot of state. 1261 */ 1262 ASSERT(dvar->dtdv_next == NULL); 1263 ASSERT(dvar == &dtrace_dynhash_sink); 1264 break; 1265 } 1266 1267 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1268 /* 1269 * We've gone off the rails: somewhere along 1270 * the line, one of the members of this hash 1271 * chain was deleted. Note that we could also 1272 * detect this by simply letting this loop run 1273 * to completion, as we would eventually hit 1274 * the end of the dirty list. However, we 1275 * want to avoid running the length of the 1276 * dirty list unnecessarily (it might be quite 1277 * long), so we catch this as early as 1278 * possible by detecting the hash marker. In 1279 * this case, we simply set dvar to NULL and 1280 * break; the conditional after the loop will 1281 * send us back to top. 1282 */ 1283 dvar = NULL; 1284 break; 1285 } 1286 1287 goto next; 1288 } 1289 1290 if (dtuple->dtt_nkeys != nkeys) 1291 goto next; 1292 1293 for (i = 0; i < nkeys; i++, dkey++) { 1294 if (dkey->dttk_size != key[i].dttk_size) 1295 goto next; /* size or type mismatch */ 1296 1297 if (dkey->dttk_size != 0) { 1298 if (dtrace_bcmp( 1299 (void *)(uintptr_t)key[i].dttk_value, 1300 (void *)(uintptr_t)dkey->dttk_value, 1301 dkey->dttk_size)) 1302 goto next; 1303 } else { 1304 if (dkey->dttk_value != key[i].dttk_value) 1305 goto next; 1306 } 1307 } 1308 1309 if (op != DTRACE_DYNVAR_DEALLOC) 1310 return (dvar); 1311 1312 ASSERT(dvar->dtdv_next == NULL || 1313 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1314 1315 if (prev != NULL) { 1316 ASSERT(hash[bucket].dtdh_chain != dvar); 1317 ASSERT(start != dvar); 1318 ASSERT(prev->dtdv_next == dvar); 1319 prev->dtdv_next = dvar->dtdv_next; 1320 } else { 1321 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1322 start, dvar->dtdv_next) != start) { 1323 /* 1324 * We have failed to atomically swing the 1325 * hash table head pointer, presumably because 1326 * of a conflicting allocation on another CPU. 1327 * We need to reread the hash chain and try 1328 * again. 1329 */ 1330 goto top; 1331 } 1332 } 1333 1334 dtrace_membar_producer(); 1335 1336 /* 1337 * Now set the hash value to indicate that it's free. 1338 */ 1339 ASSERT(hash[bucket].dtdh_chain != dvar); 1340 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1341 1342 dtrace_membar_producer(); 1343 1344 /* 1345 * Set the next pointer to point at the dirty list, and 1346 * atomically swing the dirty pointer to the newly freed dvar. 1347 */ 1348 do { 1349 next = dcpu->dtdsc_dirty; 1350 dvar->dtdv_next = next; 1351 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1352 1353 /* 1354 * Finally, unlock this hash bucket. 1355 */ 1356 ASSERT(hash[bucket].dtdh_lock == lock); 1357 ASSERT(lock & 1); 1358 hash[bucket].dtdh_lock++; 1359 1360 return (NULL); 1361 next: 1362 prev = dvar; 1363 continue; 1364 } 1365 1366 if (dvar == NULL) { 1367 /* 1368 * If dvar is NULL, it is because we went off the rails: 1369 * one of the elements that we traversed in the hash chain 1370 * was deleted while we were traversing it. In this case, 1371 * we assert that we aren't doing a dealloc (deallocs lock 1372 * the hash bucket to prevent themselves from racing with 1373 * one another), and retry the hash chain traversal. 1374 */ 1375 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1376 goto top; 1377 } 1378 1379 if (op != DTRACE_DYNVAR_ALLOC) { 1380 /* 1381 * If we are not to allocate a new variable, we want to 1382 * return NULL now. Before we return, check that the value 1383 * of the lock word hasn't changed. If it has, we may have 1384 * seen an inconsistent snapshot. 1385 */ 1386 if (op == DTRACE_DYNVAR_NOALLOC) { 1387 if (hash[bucket].dtdh_lock != lock) 1388 goto top; 1389 } else { 1390 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1391 ASSERT(hash[bucket].dtdh_lock == lock); 1392 ASSERT(lock & 1); 1393 hash[bucket].dtdh_lock++; 1394 } 1395 1396 return (NULL); 1397 } 1398 1399 /* 1400 * We need to allocate a new dynamic variable. The size we need is the 1401 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1402 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1403 * the size of any referred-to data (dsize). We then round the final 1404 * size up to the chunksize for allocation. 1405 */ 1406 for (ksize = 0, i = 0; i < nkeys; i++) 1407 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1408 1409 /* 1410 * This should be pretty much impossible, but could happen if, say, 1411 * strange DIF specified the tuple. Ideally, this should be an 1412 * assertion and not an error condition -- but that requires that the 1413 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1414 * bullet-proof. (That is, it must not be able to be fooled by 1415 * malicious DIF.) Given the lack of backwards branches in DIF, 1416 * solving this would presumably not amount to solving the Halting 1417 * Problem -- but it still seems awfully hard. 1418 */ 1419 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1420 ksize + dsize > chunksize) { 1421 dcpu->dtdsc_drops++; 1422 return (NULL); 1423 } 1424 1425 nstate = DTRACE_DSTATE_EMPTY; 1426 1427 do { 1428 retry: 1429 free = dcpu->dtdsc_free; 1430 1431 if (free == NULL) { 1432 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1433 void *rval; 1434 1435 if (clean == NULL) { 1436 /* 1437 * We're out of dynamic variable space on 1438 * this CPU. Unless we have tried all CPUs, 1439 * we'll try to allocate from a different 1440 * CPU. 1441 */ 1442 switch (dstate->dtds_state) { 1443 case DTRACE_DSTATE_CLEAN: { 1444 void *sp = &dstate->dtds_state; 1445 1446 if (++cpu >= NCPU) 1447 cpu = 0; 1448 1449 if (dcpu->dtdsc_dirty != NULL && 1450 nstate == DTRACE_DSTATE_EMPTY) 1451 nstate = DTRACE_DSTATE_DIRTY; 1452 1453 if (dcpu->dtdsc_rinsing != NULL) 1454 nstate = DTRACE_DSTATE_RINSING; 1455 1456 dcpu = &dstate->dtds_percpu[cpu]; 1457 1458 if (cpu != me) 1459 goto retry; 1460 1461 (void) dtrace_cas32(sp, 1462 DTRACE_DSTATE_CLEAN, nstate); 1463 1464 /* 1465 * To increment the correct bean 1466 * counter, take another lap. 1467 */ 1468 goto retry; 1469 } 1470 1471 case DTRACE_DSTATE_DIRTY: 1472 dcpu->dtdsc_dirty_drops++; 1473 break; 1474 1475 case DTRACE_DSTATE_RINSING: 1476 dcpu->dtdsc_rinsing_drops++; 1477 break; 1478 1479 case DTRACE_DSTATE_EMPTY: 1480 dcpu->dtdsc_drops++; 1481 break; 1482 } 1483 1484 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1485 return (NULL); 1486 } 1487 1488 /* 1489 * The clean list appears to be non-empty. We want to 1490 * move the clean list to the free list; we start by 1491 * moving the clean pointer aside. 1492 */ 1493 if (dtrace_casptr(&dcpu->dtdsc_clean, 1494 clean, NULL) != clean) { 1495 /* 1496 * We are in one of two situations: 1497 * 1498 * (a) The clean list was switched to the 1499 * free list by another CPU. 1500 * 1501 * (b) The clean list was added to by the 1502 * cleansing cyclic. 1503 * 1504 * In either of these situations, we can 1505 * just reattempt the free list allocation. 1506 */ 1507 goto retry; 1508 } 1509 1510 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1511 1512 /* 1513 * Now we'll move the clean list to the free list. 1514 * It's impossible for this to fail: the only way 1515 * the free list can be updated is through this 1516 * code path, and only one CPU can own the clean list. 1517 * Thus, it would only be possible for this to fail if 1518 * this code were racing with dtrace_dynvar_clean(). 1519 * (That is, if dtrace_dynvar_clean() updated the clean 1520 * list, and we ended up racing to update the free 1521 * list.) This race is prevented by the dtrace_sync() 1522 * in dtrace_dynvar_clean() -- which flushes the 1523 * owners of the clean lists out before resetting 1524 * the clean lists. 1525 */ 1526 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1527 ASSERT(rval == NULL); 1528 goto retry; 1529 } 1530 1531 dvar = free; 1532 new_free = dvar->dtdv_next; 1533 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1534 1535 /* 1536 * We have now allocated a new chunk. We copy the tuple keys into the 1537 * tuple array and copy any referenced key data into the data space 1538 * following the tuple array. As we do this, we relocate dttk_value 1539 * in the final tuple to point to the key data address in the chunk. 1540 */ 1541 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1542 dvar->dtdv_data = (void *)(kdata + ksize); 1543 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1544 1545 for (i = 0; i < nkeys; i++) { 1546 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1547 size_t kesize = key[i].dttk_size; 1548 1549 if (kesize != 0) { 1550 dtrace_bcopy( 1551 (const void *)(uintptr_t)key[i].dttk_value, 1552 (void *)kdata, kesize); 1553 dkey->dttk_value = kdata; 1554 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1555 } else { 1556 dkey->dttk_value = key[i].dttk_value; 1557 } 1558 1559 dkey->dttk_size = kesize; 1560 } 1561 1562 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1563 dvar->dtdv_hashval = hashval; 1564 dvar->dtdv_next = start; 1565 1566 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1567 return (dvar); 1568 1569 /* 1570 * The cas has failed. Either another CPU is adding an element to 1571 * this hash chain, or another CPU is deleting an element from this 1572 * hash chain. The simplest way to deal with both of these cases 1573 * (though not necessarily the most efficient) is to free our 1574 * allocated block and tail-call ourselves. Note that the free is 1575 * to the dirty list and _not_ to the free list. This is to prevent 1576 * races with allocators, above. 1577 */ 1578 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1579 1580 dtrace_membar_producer(); 1581 1582 do { 1583 free = dcpu->dtdsc_dirty; 1584 dvar->dtdv_next = free; 1585 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1586 1587 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1588 } 1589 1590 /*ARGSUSED*/ 1591 static void 1592 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1593 { 1594 if (nval < *oval) 1595 *oval = nval; 1596 } 1597 1598 /*ARGSUSED*/ 1599 static void 1600 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1601 { 1602 if (nval > *oval) 1603 *oval = nval; 1604 } 1605 1606 static void 1607 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1608 { 1609 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1610 int64_t val = (int64_t)nval; 1611 1612 if (val < 0) { 1613 for (i = 0; i < zero; i++) { 1614 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1615 quanta[i] += incr; 1616 return; 1617 } 1618 } 1619 } else { 1620 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1621 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1622 quanta[i - 1] += incr; 1623 return; 1624 } 1625 } 1626 1627 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1628 return; 1629 } 1630 1631 ASSERT(0); 1632 } 1633 1634 static void 1635 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1636 { 1637 uint64_t arg = *lquanta++; 1638 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1639 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1640 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1641 int32_t val = (int32_t)nval, level; 1642 1643 ASSERT(step != 0); 1644 ASSERT(levels != 0); 1645 1646 if (val < base) { 1647 /* 1648 * This is an underflow. 1649 */ 1650 lquanta[0] += incr; 1651 return; 1652 } 1653 1654 level = (val - base) / step; 1655 1656 if (level < levels) { 1657 lquanta[level + 1] += incr; 1658 return; 1659 } 1660 1661 /* 1662 * This is an overflow. 1663 */ 1664 lquanta[levels + 1] += incr; 1665 } 1666 1667 /*ARGSUSED*/ 1668 static void 1669 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1670 { 1671 data[0]++; 1672 data[1] += nval; 1673 } 1674 1675 /*ARGSUSED*/ 1676 static void 1677 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1678 { 1679 *oval = *oval + 1; 1680 } 1681 1682 /*ARGSUSED*/ 1683 static void 1684 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1685 { 1686 *oval += nval; 1687 } 1688 1689 /* 1690 * Aggregate given the tuple in the principal data buffer, and the aggregating 1691 * action denoted by the specified dtrace_aggregation_t. The aggregation 1692 * buffer is specified as the buf parameter. This routine does not return 1693 * failure; if there is no space in the aggregation buffer, the data will be 1694 * dropped, and a corresponding counter incremented. 1695 */ 1696 static void 1697 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1698 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1699 { 1700 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1701 uint32_t i, ndx, size, fsize; 1702 uint32_t align = sizeof (uint64_t) - 1; 1703 dtrace_aggbuffer_t *agb; 1704 dtrace_aggkey_t *key; 1705 uint32_t hashval = 0, limit, isstr; 1706 caddr_t tomax, data, kdata; 1707 dtrace_actkind_t action; 1708 dtrace_action_t *act; 1709 uintptr_t offs; 1710 1711 if (buf == NULL) 1712 return; 1713 1714 if (!agg->dtag_hasarg) { 1715 /* 1716 * Currently, only quantize() and lquantize() take additional 1717 * arguments, and they have the same semantics: an increment 1718 * value that defaults to 1 when not present. If additional 1719 * aggregating actions take arguments, the setting of the 1720 * default argument value will presumably have to become more 1721 * sophisticated... 1722 */ 1723 arg = 1; 1724 } 1725 1726 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1727 size = rec->dtrd_offset - agg->dtag_base; 1728 fsize = size + rec->dtrd_size; 1729 1730 ASSERT(dbuf->dtb_tomax != NULL); 1731 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1732 1733 if ((tomax = buf->dtb_tomax) == NULL) { 1734 dtrace_buffer_drop(buf); 1735 return; 1736 } 1737 1738 /* 1739 * The metastructure is always at the bottom of the buffer. 1740 */ 1741 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1742 sizeof (dtrace_aggbuffer_t)); 1743 1744 if (buf->dtb_offset == 0) { 1745 /* 1746 * We just kludge up approximately 1/8th of the size to be 1747 * buckets. If this guess ends up being routinely 1748 * off-the-mark, we may need to dynamically readjust this 1749 * based on past performance. 1750 */ 1751 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1752 1753 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1754 (uintptr_t)tomax || hashsize == 0) { 1755 /* 1756 * We've been given a ludicrously small buffer; 1757 * increment our drop count and leave. 1758 */ 1759 dtrace_buffer_drop(buf); 1760 return; 1761 } 1762 1763 /* 1764 * And now, a pathetic attempt to try to get a an odd (or 1765 * perchance, a prime) hash size for better hash distribution. 1766 */ 1767 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1768 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1769 1770 agb->dtagb_hashsize = hashsize; 1771 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1772 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1773 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1774 1775 for (i = 0; i < agb->dtagb_hashsize; i++) 1776 agb->dtagb_hash[i] = NULL; 1777 } 1778 1779 ASSERT(agg->dtag_first != NULL); 1780 ASSERT(agg->dtag_first->dta_intuple); 1781 1782 /* 1783 * Calculate the hash value based on the key. Note that we _don't_ 1784 * include the aggid in the hashing (but we will store it as part of 1785 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1786 * algorithm: a simple, quick algorithm that has no known funnels, and 1787 * gets good distribution in practice. The efficacy of the hashing 1788 * algorithm (and a comparison with other algorithms) may be found by 1789 * running the ::dtrace_aggstat MDB dcmd. 1790 */ 1791 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1792 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1793 limit = i + act->dta_rec.dtrd_size; 1794 ASSERT(limit <= size); 1795 isstr = DTRACEACT_ISSTRING(act); 1796 1797 for (; i < limit; i++) { 1798 hashval += data[i]; 1799 hashval += (hashval << 10); 1800 hashval ^= (hashval >> 6); 1801 1802 if (isstr && data[i] == '\0') 1803 break; 1804 } 1805 } 1806 1807 hashval += (hashval << 3); 1808 hashval ^= (hashval >> 11); 1809 hashval += (hashval << 15); 1810 1811 /* 1812 * Yes, the divide here is expensive -- but it's generally the least 1813 * of the performance issues given the amount of data that we iterate 1814 * over to compute hash values, compare data, etc. 1815 */ 1816 ndx = hashval % agb->dtagb_hashsize; 1817 1818 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1819 ASSERT((caddr_t)key >= tomax); 1820 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1821 1822 if (hashval != key->dtak_hashval || key->dtak_size != size) 1823 continue; 1824 1825 kdata = key->dtak_data; 1826 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1827 1828 for (act = agg->dtag_first; act->dta_intuple; 1829 act = act->dta_next) { 1830 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1831 limit = i + act->dta_rec.dtrd_size; 1832 ASSERT(limit <= size); 1833 isstr = DTRACEACT_ISSTRING(act); 1834 1835 for (; i < limit; i++) { 1836 if (kdata[i] != data[i]) 1837 goto next; 1838 1839 if (isstr && data[i] == '\0') 1840 break; 1841 } 1842 } 1843 1844 if (action != key->dtak_action) { 1845 /* 1846 * We are aggregating on the same value in the same 1847 * aggregation with two different aggregating actions. 1848 * (This should have been picked up in the compiler, 1849 * so we may be dealing with errant or devious DIF.) 1850 * This is an error condition; we indicate as much, 1851 * and return. 1852 */ 1853 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1854 return; 1855 } 1856 1857 /* 1858 * This is a hit: we need to apply the aggregator to 1859 * the value at this key. 1860 */ 1861 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1862 return; 1863 next: 1864 continue; 1865 } 1866 1867 /* 1868 * We didn't find it. We need to allocate some zero-filled space, 1869 * link it into the hash table appropriately, and apply the aggregator 1870 * to the (zero-filled) value. 1871 */ 1872 offs = buf->dtb_offset; 1873 while (offs & (align - 1)) 1874 offs += sizeof (uint32_t); 1875 1876 /* 1877 * If we don't have enough room to both allocate a new key _and_ 1878 * its associated data, increment the drop count and return. 1879 */ 1880 if ((uintptr_t)tomax + offs + fsize > 1881 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1882 dtrace_buffer_drop(buf); 1883 return; 1884 } 1885 1886 /*CONSTCOND*/ 1887 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1888 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1889 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1890 1891 key->dtak_data = kdata = tomax + offs; 1892 buf->dtb_offset = offs + fsize; 1893 1894 /* 1895 * Now copy the data across. 1896 */ 1897 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1898 1899 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1900 kdata[i] = data[i]; 1901 1902 /* 1903 * Because strings are not zeroed out by default, we need to iterate 1904 * looking for actions that store strings, and we need to explicitly 1905 * pad these strings out with zeroes. 1906 */ 1907 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1908 int nul; 1909 1910 if (!DTRACEACT_ISSTRING(act)) 1911 continue; 1912 1913 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1914 limit = i + act->dta_rec.dtrd_size; 1915 ASSERT(limit <= size); 1916 1917 for (nul = 0; i < limit; i++) { 1918 if (nul) { 1919 kdata[i] = '\0'; 1920 continue; 1921 } 1922 1923 if (data[i] != '\0') 1924 continue; 1925 1926 nul = 1; 1927 } 1928 } 1929 1930 for (i = size; i < fsize; i++) 1931 kdata[i] = 0; 1932 1933 key->dtak_hashval = hashval; 1934 key->dtak_size = size; 1935 key->dtak_action = action; 1936 key->dtak_next = agb->dtagb_hash[ndx]; 1937 agb->dtagb_hash[ndx] = key; 1938 1939 /* 1940 * Finally, apply the aggregator. 1941 */ 1942 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1943 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1944 } 1945 1946 /* 1947 * Given consumer state, this routine finds a speculation in the INACTIVE 1948 * state and transitions it into the ACTIVE state. If there is no speculation 1949 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1950 * incremented -- it is up to the caller to take appropriate action. 1951 */ 1952 static int 1953 dtrace_speculation(dtrace_state_t *state) 1954 { 1955 int i = 0; 1956 dtrace_speculation_state_t current; 1957 uint32_t *stat = &state->dts_speculations_unavail, count; 1958 1959 while (i < state->dts_nspeculations) { 1960 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1961 1962 current = spec->dtsp_state; 1963 1964 if (current != DTRACESPEC_INACTIVE) { 1965 if (current == DTRACESPEC_COMMITTINGMANY || 1966 current == DTRACESPEC_COMMITTING || 1967 current == DTRACESPEC_DISCARDING) 1968 stat = &state->dts_speculations_busy; 1969 i++; 1970 continue; 1971 } 1972 1973 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1974 current, DTRACESPEC_ACTIVE) == current) 1975 return (i + 1); 1976 } 1977 1978 /* 1979 * We couldn't find a speculation. If we found as much as a single 1980 * busy speculation buffer, we'll attribute this failure as "busy" 1981 * instead of "unavail". 1982 */ 1983 do { 1984 count = *stat; 1985 } while (dtrace_cas32(stat, count, count + 1) != count); 1986 1987 return (0); 1988 } 1989 1990 /* 1991 * This routine commits an active speculation. If the specified speculation 1992 * is not in a valid state to perform a commit(), this routine will silently do 1993 * nothing. The state of the specified speculation is transitioned according 1994 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1995 */ 1996 static void 1997 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1998 dtrace_specid_t which) 1999 { 2000 dtrace_speculation_t *spec; 2001 dtrace_buffer_t *src, *dest; 2002 uintptr_t daddr, saddr, dlimit; 2003 dtrace_speculation_state_t current, new; 2004 intptr_t offs; 2005 2006 if (which == 0) 2007 return; 2008 2009 if (which > state->dts_nspeculations) { 2010 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2011 return; 2012 } 2013 2014 spec = &state->dts_speculations[which - 1]; 2015 src = &spec->dtsp_buffer[cpu]; 2016 dest = &state->dts_buffer[cpu]; 2017 2018 do { 2019 current = spec->dtsp_state; 2020 2021 if (current == DTRACESPEC_COMMITTINGMANY) 2022 break; 2023 2024 switch (current) { 2025 case DTRACESPEC_INACTIVE: 2026 case DTRACESPEC_DISCARDING: 2027 return; 2028 2029 case DTRACESPEC_COMMITTING: 2030 /* 2031 * This is only possible if we are (a) commit()'ing 2032 * without having done a prior speculate() on this CPU 2033 * and (b) racing with another commit() on a different 2034 * CPU. There's nothing to do -- we just assert that 2035 * our offset is 0. 2036 */ 2037 ASSERT(src->dtb_offset == 0); 2038 return; 2039 2040 case DTRACESPEC_ACTIVE: 2041 new = DTRACESPEC_COMMITTING; 2042 break; 2043 2044 case DTRACESPEC_ACTIVEONE: 2045 /* 2046 * This speculation is active on one CPU. If our 2047 * buffer offset is non-zero, we know that the one CPU 2048 * must be us. Otherwise, we are committing on a 2049 * different CPU from the speculate(), and we must 2050 * rely on being asynchronously cleaned. 2051 */ 2052 if (src->dtb_offset != 0) { 2053 new = DTRACESPEC_COMMITTING; 2054 break; 2055 } 2056 /*FALLTHROUGH*/ 2057 2058 case DTRACESPEC_ACTIVEMANY: 2059 new = DTRACESPEC_COMMITTINGMANY; 2060 break; 2061 2062 default: 2063 ASSERT(0); 2064 } 2065 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2066 current, new) != current); 2067 2068 /* 2069 * We have set the state to indicate that we are committing this 2070 * speculation. Now reserve the necessary space in the destination 2071 * buffer. 2072 */ 2073 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2074 sizeof (uint64_t), state, NULL)) < 0) { 2075 dtrace_buffer_drop(dest); 2076 goto out; 2077 } 2078 2079 /* 2080 * We have the space; copy the buffer across. (Note that this is a 2081 * highly subobtimal bcopy(); in the unlikely event that this becomes 2082 * a serious performance issue, a high-performance DTrace-specific 2083 * bcopy() should obviously be invented.) 2084 */ 2085 daddr = (uintptr_t)dest->dtb_tomax + offs; 2086 dlimit = daddr + src->dtb_offset; 2087 saddr = (uintptr_t)src->dtb_tomax; 2088 2089 /* 2090 * First, the aligned portion. 2091 */ 2092 while (dlimit - daddr >= sizeof (uint64_t)) { 2093 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2094 2095 daddr += sizeof (uint64_t); 2096 saddr += sizeof (uint64_t); 2097 } 2098 2099 /* 2100 * Now any left-over bit... 2101 */ 2102 while (dlimit - daddr) 2103 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2104 2105 /* 2106 * Finally, commit the reserved space in the destination buffer. 2107 */ 2108 dest->dtb_offset = offs + src->dtb_offset; 2109 2110 out: 2111 /* 2112 * If we're lucky enough to be the only active CPU on this speculation 2113 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2114 */ 2115 if (current == DTRACESPEC_ACTIVE || 2116 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2117 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2118 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2119 2120 ASSERT(rval == DTRACESPEC_COMMITTING); 2121 } 2122 2123 src->dtb_offset = 0; 2124 src->dtb_xamot_drops += src->dtb_drops; 2125 src->dtb_drops = 0; 2126 } 2127 2128 /* 2129 * This routine discards an active speculation. If the specified speculation 2130 * is not in a valid state to perform a discard(), this routine will silently 2131 * do nothing. The state of the specified speculation is transitioned 2132 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2133 */ 2134 static void 2135 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2136 dtrace_specid_t which) 2137 { 2138 dtrace_speculation_t *spec; 2139 dtrace_speculation_state_t current, new; 2140 dtrace_buffer_t *buf; 2141 2142 if (which == 0) 2143 return; 2144 2145 if (which > state->dts_nspeculations) { 2146 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2147 return; 2148 } 2149 2150 spec = &state->dts_speculations[which - 1]; 2151 buf = &spec->dtsp_buffer[cpu]; 2152 2153 do { 2154 current = spec->dtsp_state; 2155 2156 switch (current) { 2157 case DTRACESPEC_INACTIVE: 2158 case DTRACESPEC_COMMITTINGMANY: 2159 case DTRACESPEC_COMMITTING: 2160 case DTRACESPEC_DISCARDING: 2161 return; 2162 2163 case DTRACESPEC_ACTIVE: 2164 case DTRACESPEC_ACTIVEMANY: 2165 new = DTRACESPEC_DISCARDING; 2166 break; 2167 2168 case DTRACESPEC_ACTIVEONE: 2169 if (buf->dtb_offset != 0) { 2170 new = DTRACESPEC_INACTIVE; 2171 } else { 2172 new = DTRACESPEC_DISCARDING; 2173 } 2174 break; 2175 2176 default: 2177 ASSERT(0); 2178 } 2179 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2180 current, new) != current); 2181 2182 buf->dtb_offset = 0; 2183 buf->dtb_drops = 0; 2184 } 2185 2186 /* 2187 * Note: not called from probe context. This function is called 2188 * asynchronously from cross call context to clean any speculations that are 2189 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2190 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2191 * speculation. 2192 */ 2193 static void 2194 dtrace_speculation_clean_here(dtrace_state_t *state) 2195 { 2196 dtrace_icookie_t cookie; 2197 processorid_t cpu = CPU->cpu_id; 2198 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2199 dtrace_specid_t i; 2200 2201 cookie = dtrace_interrupt_disable(); 2202 2203 if (dest->dtb_tomax == NULL) { 2204 dtrace_interrupt_enable(cookie); 2205 return; 2206 } 2207 2208 for (i = 0; i < state->dts_nspeculations; i++) { 2209 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2210 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2211 2212 if (src->dtb_tomax == NULL) 2213 continue; 2214 2215 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2216 src->dtb_offset = 0; 2217 continue; 2218 } 2219 2220 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2221 continue; 2222 2223 if (src->dtb_offset == 0) 2224 continue; 2225 2226 dtrace_speculation_commit(state, cpu, i + 1); 2227 } 2228 2229 dtrace_interrupt_enable(cookie); 2230 } 2231 2232 /* 2233 * Note: not called from probe context. This function is called 2234 * asynchronously (and at a regular interval) to clean any speculations that 2235 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2236 * is work to be done, it cross calls all CPUs to perform that work; 2237 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2238 * INACTIVE state until they have been cleaned by all CPUs. 2239 */ 2240 static void 2241 dtrace_speculation_clean(dtrace_state_t *state) 2242 { 2243 int work = 0, rv; 2244 dtrace_specid_t i; 2245 2246 for (i = 0; i < state->dts_nspeculations; i++) { 2247 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2248 2249 ASSERT(!spec->dtsp_cleaning); 2250 2251 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2252 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2253 continue; 2254 2255 work++; 2256 spec->dtsp_cleaning = 1; 2257 } 2258 2259 if (!work) 2260 return; 2261 2262 dtrace_xcall(DTRACE_CPUALL, 2263 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2264 2265 /* 2266 * We now know that all CPUs have committed or discarded their 2267 * speculation buffers, as appropriate. We can now set the state 2268 * to inactive. 2269 */ 2270 for (i = 0; i < state->dts_nspeculations; i++) { 2271 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2272 dtrace_speculation_state_t current, new; 2273 2274 if (!spec->dtsp_cleaning) 2275 continue; 2276 2277 current = spec->dtsp_state; 2278 ASSERT(current == DTRACESPEC_DISCARDING || 2279 current == DTRACESPEC_COMMITTINGMANY); 2280 2281 new = DTRACESPEC_INACTIVE; 2282 2283 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2284 ASSERT(rv == current); 2285 spec->dtsp_cleaning = 0; 2286 } 2287 } 2288 2289 /* 2290 * Called as part of a speculate() to get the speculative buffer associated 2291 * with a given speculation. Returns NULL if the specified speculation is not 2292 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2293 * the active CPU is not the specified CPU -- the speculation will be 2294 * atomically transitioned into the ACTIVEMANY state. 2295 */ 2296 static dtrace_buffer_t * 2297 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2298 dtrace_specid_t which) 2299 { 2300 dtrace_speculation_t *spec; 2301 dtrace_speculation_state_t current, new; 2302 dtrace_buffer_t *buf; 2303 2304 if (which == 0) 2305 return (NULL); 2306 2307 if (which > state->dts_nspeculations) { 2308 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2309 return (NULL); 2310 } 2311 2312 spec = &state->dts_speculations[which - 1]; 2313 buf = &spec->dtsp_buffer[cpuid]; 2314 2315 do { 2316 current = spec->dtsp_state; 2317 2318 switch (current) { 2319 case DTRACESPEC_INACTIVE: 2320 case DTRACESPEC_COMMITTINGMANY: 2321 case DTRACESPEC_DISCARDING: 2322 return (NULL); 2323 2324 case DTRACESPEC_COMMITTING: 2325 ASSERT(buf->dtb_offset == 0); 2326 return (NULL); 2327 2328 case DTRACESPEC_ACTIVEONE: 2329 /* 2330 * This speculation is currently active on one CPU. 2331 * Check the offset in the buffer; if it's non-zero, 2332 * that CPU must be us (and we leave the state alone). 2333 * If it's zero, assume that we're starting on a new 2334 * CPU -- and change the state to indicate that the 2335 * speculation is active on more than one CPU. 2336 */ 2337 if (buf->dtb_offset != 0) 2338 return (buf); 2339 2340 new = DTRACESPEC_ACTIVEMANY; 2341 break; 2342 2343 case DTRACESPEC_ACTIVEMANY: 2344 return (buf); 2345 2346 case DTRACESPEC_ACTIVE: 2347 new = DTRACESPEC_ACTIVEONE; 2348 break; 2349 2350 default: 2351 ASSERT(0); 2352 } 2353 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2354 current, new) != current); 2355 2356 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2357 return (buf); 2358 } 2359 2360 /* 2361 * Return a string. In the event that the user lacks the privilege to access 2362 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2363 * don't fail access checking. 2364 * 2365 * dtrace_dif_variable() uses this routine as a helper for various 2366 * builtin values such as 'execname' and 'probefunc.' 2367 */ 2368 uintptr_t 2369 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2370 dtrace_mstate_t *mstate) 2371 { 2372 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2373 uintptr_t ret; 2374 size_t strsz; 2375 2376 /* 2377 * The easy case: this probe is allowed to read all of memory, so 2378 * we can just return this as a vanilla pointer. 2379 */ 2380 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2381 return (addr); 2382 2383 /* 2384 * This is the tougher case: we copy the string in question from 2385 * kernel memory into scratch memory and return it that way: this 2386 * ensures that we won't trip up when access checking tests the 2387 * BYREF return value. 2388 */ 2389 strsz = dtrace_strlen((char *)addr, size) + 1; 2390 2391 if (mstate->dtms_scratch_ptr + strsz > 2392 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2394 return (NULL); 2395 } 2396 2397 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2398 strsz); 2399 ret = mstate->dtms_scratch_ptr; 2400 mstate->dtms_scratch_ptr += strsz; 2401 return (ret); 2402 } 2403 2404 /* 2405 * This function implements the DIF emulator's variable lookups. The emulator 2406 * passes a reserved variable identifier and optional built-in array index. 2407 */ 2408 static uint64_t 2409 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2410 uint64_t ndx) 2411 { 2412 /* 2413 * If we're accessing one of the uncached arguments, we'll turn this 2414 * into a reference in the args array. 2415 */ 2416 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2417 ndx = v - DIF_VAR_ARG0; 2418 v = DIF_VAR_ARGS; 2419 } 2420 2421 switch (v) { 2422 case DIF_VAR_ARGS: 2423 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2424 if (ndx >= sizeof (mstate->dtms_arg) / 2425 sizeof (mstate->dtms_arg[0])) { 2426 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2427 dtrace_provider_t *pv; 2428 uint64_t val; 2429 2430 pv = mstate->dtms_probe->dtpr_provider; 2431 if (pv->dtpv_pops.dtps_getargval != NULL) 2432 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2433 mstate->dtms_probe->dtpr_id, 2434 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2435 else 2436 val = dtrace_getarg(ndx, aframes); 2437 2438 /* 2439 * This is regrettably required to keep the compiler 2440 * from tail-optimizing the call to dtrace_getarg(). 2441 * The condition always evaluates to true, but the 2442 * compiler has no way of figuring that out a priori. 2443 * (None of this would be necessary if the compiler 2444 * could be relied upon to _always_ tail-optimize 2445 * the call to dtrace_getarg() -- but it can't.) 2446 */ 2447 if (mstate->dtms_probe != NULL) 2448 return (val); 2449 2450 ASSERT(0); 2451 } 2452 2453 return (mstate->dtms_arg[ndx]); 2454 2455 case DIF_VAR_UREGS: { 2456 klwp_t *lwp; 2457 2458 if (!dtrace_priv_proc(state)) 2459 return (0); 2460 2461 if ((lwp = curthread->t_lwp) == NULL) { 2462 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2463 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2464 return (0); 2465 } 2466 2467 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2468 } 2469 2470 case DIF_VAR_CURTHREAD: 2471 if (!dtrace_priv_kernel(state)) 2472 return (0); 2473 return ((uint64_t)(uintptr_t)curthread); 2474 2475 case DIF_VAR_TIMESTAMP: 2476 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2477 mstate->dtms_timestamp = dtrace_gethrtime(); 2478 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2479 } 2480 return (mstate->dtms_timestamp); 2481 2482 case DIF_VAR_VTIMESTAMP: 2483 ASSERT(dtrace_vtime_references != 0); 2484 return (curthread->t_dtrace_vtime); 2485 2486 case DIF_VAR_WALLTIMESTAMP: 2487 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2488 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2489 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2490 } 2491 return (mstate->dtms_walltimestamp); 2492 2493 case DIF_VAR_IPL: 2494 if (!dtrace_priv_kernel(state)) 2495 return (0); 2496 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2497 mstate->dtms_ipl = dtrace_getipl(); 2498 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2499 } 2500 return (mstate->dtms_ipl); 2501 2502 case DIF_VAR_EPID: 2503 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2504 return (mstate->dtms_epid); 2505 2506 case DIF_VAR_ID: 2507 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2508 return (mstate->dtms_probe->dtpr_id); 2509 2510 case DIF_VAR_STACKDEPTH: 2511 if (!dtrace_priv_kernel(state)) 2512 return (0); 2513 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2514 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2515 2516 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2517 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2518 } 2519 return (mstate->dtms_stackdepth); 2520 2521 case DIF_VAR_USTACKDEPTH: 2522 if (!dtrace_priv_proc(state)) 2523 return (0); 2524 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2525 /* 2526 * See comment in DIF_VAR_PID. 2527 */ 2528 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2529 CPU_ON_INTR(CPU)) { 2530 mstate->dtms_ustackdepth = 0; 2531 } else { 2532 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2533 mstate->dtms_ustackdepth = 2534 dtrace_getustackdepth(); 2535 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2536 } 2537 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2538 } 2539 return (mstate->dtms_ustackdepth); 2540 2541 case DIF_VAR_CALLER: 2542 if (!dtrace_priv_kernel(state)) 2543 return (0); 2544 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2545 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2546 2547 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2548 /* 2549 * If this is an unanchored probe, we are 2550 * required to go through the slow path: 2551 * dtrace_caller() only guarantees correct 2552 * results for anchored probes. 2553 */ 2554 pc_t caller[2]; 2555 2556 dtrace_getpcstack(caller, 2, aframes, 2557 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2558 mstate->dtms_caller = caller[1]; 2559 } else if ((mstate->dtms_caller = 2560 dtrace_caller(aframes)) == -1) { 2561 /* 2562 * We have failed to do this the quick way; 2563 * we must resort to the slower approach of 2564 * calling dtrace_getpcstack(). 2565 */ 2566 pc_t caller; 2567 2568 dtrace_getpcstack(&caller, 1, aframes, NULL); 2569 mstate->dtms_caller = caller; 2570 } 2571 2572 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2573 } 2574 return (mstate->dtms_caller); 2575 2576 case DIF_VAR_UCALLER: 2577 if (!dtrace_priv_proc(state)) 2578 return (0); 2579 2580 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2581 uint64_t ustack[3]; 2582 2583 /* 2584 * dtrace_getupcstack() fills in the first uint64_t 2585 * with the current PID. The second uint64_t will 2586 * be the program counter at user-level. The third 2587 * uint64_t will contain the caller, which is what 2588 * we're after. 2589 */ 2590 ustack[2] = NULL; 2591 dtrace_getupcstack(ustack, 3); 2592 mstate->dtms_ucaller = ustack[2]; 2593 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2594 } 2595 2596 return (mstate->dtms_ucaller); 2597 2598 case DIF_VAR_PROBEPROV: 2599 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2600 return (dtrace_dif_varstr( 2601 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2602 state, mstate)); 2603 2604 case DIF_VAR_PROBEMOD: 2605 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2606 return (dtrace_dif_varstr( 2607 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2608 state, mstate)); 2609 2610 case DIF_VAR_PROBEFUNC: 2611 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2612 return (dtrace_dif_varstr( 2613 (uintptr_t)mstate->dtms_probe->dtpr_func, 2614 state, mstate)); 2615 2616 case DIF_VAR_PROBENAME: 2617 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2618 return (dtrace_dif_varstr( 2619 (uintptr_t)mstate->dtms_probe->dtpr_name, 2620 state, mstate)); 2621 2622 case DIF_VAR_PID: 2623 if (!dtrace_priv_proc(state)) 2624 return (0); 2625 2626 /* 2627 * Note that we are assuming that an unanchored probe is 2628 * always due to a high-level interrupt. (And we're assuming 2629 * that there is only a single high level interrupt.) 2630 */ 2631 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2632 return (pid0.pid_id); 2633 2634 /* 2635 * It is always safe to dereference one's own t_procp pointer: 2636 * it always points to a valid, allocated proc structure. 2637 * Further, it is always safe to dereference the p_pidp member 2638 * of one's own proc structure. (These are truisms becuase 2639 * threads and processes don't clean up their own state -- 2640 * they leave that task to whomever reaps them.) 2641 */ 2642 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2643 2644 case DIF_VAR_PPID: 2645 if (!dtrace_priv_proc(state)) 2646 return (0); 2647 2648 /* 2649 * See comment in DIF_VAR_PID. 2650 */ 2651 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2652 return (pid0.pid_id); 2653 2654 /* 2655 * It is always safe to dereference one's own t_procp pointer: 2656 * it always points to a valid, allocated proc structure. 2657 * (This is true because threads don't clean up their own 2658 * state -- they leave that task to whomever reaps them.) 2659 */ 2660 return ((uint64_t)curthread->t_procp->p_ppid); 2661 2662 case DIF_VAR_TID: 2663 /* 2664 * See comment in DIF_VAR_PID. 2665 */ 2666 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2667 return (0); 2668 2669 return ((uint64_t)curthread->t_tid); 2670 2671 case DIF_VAR_EXECNAME: 2672 if (!dtrace_priv_proc(state)) 2673 return (0); 2674 2675 /* 2676 * See comment in DIF_VAR_PID. 2677 */ 2678 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2679 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2680 2681 /* 2682 * It is always safe to dereference one's own t_procp pointer: 2683 * it always points to a valid, allocated proc structure. 2684 * (This is true because threads don't clean up their own 2685 * state -- they leave that task to whomever reaps them.) 2686 */ 2687 return (dtrace_dif_varstr( 2688 (uintptr_t)curthread->t_procp->p_user.u_comm, 2689 state, mstate)); 2690 2691 case DIF_VAR_ZONENAME: 2692 if (!dtrace_priv_proc(state)) 2693 return (0); 2694 2695 /* 2696 * See comment in DIF_VAR_PID. 2697 */ 2698 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2699 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2700 2701 /* 2702 * It is always safe to dereference one's own t_procp pointer: 2703 * it always points to a valid, allocated proc structure. 2704 * (This is true because threads don't clean up their own 2705 * state -- they leave that task to whomever reaps them.) 2706 */ 2707 return (dtrace_dif_varstr( 2708 (uintptr_t)curthread->t_procp->p_zone->zone_name, 2709 state, mstate)); 2710 2711 case DIF_VAR_UID: 2712 if (!dtrace_priv_proc(state)) 2713 return (0); 2714 2715 /* 2716 * See comment in DIF_VAR_PID. 2717 */ 2718 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2719 return ((uint64_t)p0.p_cred->cr_uid); 2720 2721 /* 2722 * It is always safe to dereference one's own t_procp pointer: 2723 * it always points to a valid, allocated proc structure. 2724 * (This is true because threads don't clean up their own 2725 * state -- they leave that task to whomever reaps them.) 2726 * 2727 * Additionally, it is safe to dereference one's own process 2728 * credential, since this is never NULL after process birth. 2729 */ 2730 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 2731 2732 case DIF_VAR_GID: 2733 if (!dtrace_priv_proc(state)) 2734 return (0); 2735 2736 /* 2737 * See comment in DIF_VAR_PID. 2738 */ 2739 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2740 return ((uint64_t)p0.p_cred->cr_gid); 2741 2742 /* 2743 * It is always safe to dereference one's own t_procp pointer: 2744 * it always points to a valid, allocated proc structure. 2745 * (This is true because threads don't clean up their own 2746 * state -- they leave that task to whomever reaps them.) 2747 * 2748 * Additionally, it is safe to dereference one's own process 2749 * credential, since this is never NULL after process birth. 2750 */ 2751 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 2752 2753 case DIF_VAR_ERRNO: { 2754 klwp_t *lwp; 2755 if (!dtrace_priv_proc(state)) 2756 return (0); 2757 2758 /* 2759 * See comment in DIF_VAR_PID. 2760 */ 2761 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2762 return (0); 2763 2764 /* 2765 * It is always safe to dereference one's own t_lwp pointer in 2766 * the event that this pointer is non-NULL. (This is true 2767 * because threads and lwps don't clean up their own state -- 2768 * they leave that task to whomever reaps them.) 2769 */ 2770 if ((lwp = curthread->t_lwp) == NULL) 2771 return (0); 2772 2773 return ((uint64_t)lwp->lwp_errno); 2774 } 2775 default: 2776 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2777 return (0); 2778 } 2779 } 2780 2781 /* 2782 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2783 * Notice that we don't bother validating the proper number of arguments or 2784 * their types in the tuple stack. This isn't needed because all argument 2785 * interpretation is safe because of our load safety -- the worst that can 2786 * happen is that a bogus program can obtain bogus results. 2787 */ 2788 static void 2789 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2790 dtrace_key_t *tupregs, int nargs, 2791 dtrace_mstate_t *mstate, dtrace_state_t *state) 2792 { 2793 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2794 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2795 dtrace_vstate_t *vstate = &state->dts_vstate; 2796 2797 union { 2798 mutex_impl_t mi; 2799 uint64_t mx; 2800 } m; 2801 2802 union { 2803 krwlock_t ri; 2804 uintptr_t rw; 2805 } r; 2806 2807 switch (subr) { 2808 case DIF_SUBR_RAND: 2809 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2810 break; 2811 2812 case DIF_SUBR_MUTEX_OWNED: 2813 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2814 mstate, vstate)) { 2815 regs[rd] = NULL; 2816 break; 2817 } 2818 2819 m.mx = dtrace_load64(tupregs[0].dttk_value); 2820 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2821 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2822 else 2823 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2824 break; 2825 2826 case DIF_SUBR_MUTEX_OWNER: 2827 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2828 mstate, vstate)) { 2829 regs[rd] = NULL; 2830 break; 2831 } 2832 2833 m.mx = dtrace_load64(tupregs[0].dttk_value); 2834 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2835 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2836 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2837 else 2838 regs[rd] = 0; 2839 break; 2840 2841 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2842 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2843 mstate, vstate)) { 2844 regs[rd] = NULL; 2845 break; 2846 } 2847 2848 m.mx = dtrace_load64(tupregs[0].dttk_value); 2849 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2850 break; 2851 2852 case DIF_SUBR_MUTEX_TYPE_SPIN: 2853 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2854 mstate, vstate)) { 2855 regs[rd] = NULL; 2856 break; 2857 } 2858 2859 m.mx = dtrace_load64(tupregs[0].dttk_value); 2860 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2861 break; 2862 2863 case DIF_SUBR_RW_READ_HELD: { 2864 uintptr_t tmp; 2865 2866 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 2867 mstate, vstate)) { 2868 regs[rd] = NULL; 2869 break; 2870 } 2871 2872 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2873 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2874 break; 2875 } 2876 2877 case DIF_SUBR_RW_WRITE_HELD: 2878 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2879 mstate, vstate)) { 2880 regs[rd] = NULL; 2881 break; 2882 } 2883 2884 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2885 regs[rd] = _RW_WRITE_HELD(&r.ri); 2886 break; 2887 2888 case DIF_SUBR_RW_ISWRITER: 2889 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2890 mstate, vstate)) { 2891 regs[rd] = NULL; 2892 break; 2893 } 2894 2895 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2896 regs[rd] = _RW_ISWRITER(&r.ri); 2897 break; 2898 2899 case DIF_SUBR_BCOPY: { 2900 /* 2901 * We need to be sure that the destination is in the scratch 2902 * region -- no other region is allowed. 2903 */ 2904 uintptr_t src = tupregs[0].dttk_value; 2905 uintptr_t dest = tupregs[1].dttk_value; 2906 size_t size = tupregs[2].dttk_value; 2907 2908 if (!dtrace_inscratch(dest, size, mstate)) { 2909 *flags |= CPU_DTRACE_BADADDR; 2910 *illval = regs[rd]; 2911 break; 2912 } 2913 2914 if (!dtrace_canload(src, size, mstate, vstate)) { 2915 regs[rd] = NULL; 2916 break; 2917 } 2918 2919 dtrace_bcopy((void *)src, (void *)dest, size); 2920 break; 2921 } 2922 2923 case DIF_SUBR_ALLOCA: 2924 case DIF_SUBR_COPYIN: { 2925 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2926 uint64_t size = 2927 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2928 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2929 2930 /* 2931 * This action doesn't require any credential checks since 2932 * probes will not activate in user contexts to which the 2933 * enabling user does not have permissions. 2934 */ 2935 if (mstate->dtms_scratch_ptr + scratch_size > 2936 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2937 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2938 regs[rd] = NULL; 2939 break; 2940 } 2941 2942 if (subr == DIF_SUBR_COPYIN) { 2943 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2944 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2945 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2946 } 2947 2948 mstate->dtms_scratch_ptr += scratch_size; 2949 regs[rd] = dest; 2950 break; 2951 } 2952 2953 case DIF_SUBR_COPYINTO: { 2954 uint64_t size = tupregs[1].dttk_value; 2955 uintptr_t dest = tupregs[2].dttk_value; 2956 2957 /* 2958 * This action doesn't require any credential checks since 2959 * probes will not activate in user contexts to which the 2960 * enabling user does not have permissions. 2961 */ 2962 if (!dtrace_inscratch(dest, size, mstate)) { 2963 *flags |= CPU_DTRACE_BADADDR; 2964 *illval = regs[rd]; 2965 break; 2966 } 2967 2968 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2969 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2970 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2971 break; 2972 } 2973 2974 case DIF_SUBR_COPYINSTR: { 2975 uintptr_t dest = mstate->dtms_scratch_ptr; 2976 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2977 2978 if (nargs > 1 && tupregs[1].dttk_value < size) 2979 size = tupregs[1].dttk_value + 1; 2980 2981 /* 2982 * This action doesn't require any credential checks since 2983 * probes will not activate in user contexts to which the 2984 * enabling user does not have permissions. 2985 */ 2986 if (mstate->dtms_scratch_ptr + size > 2987 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2988 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2989 regs[rd] = NULL; 2990 break; 2991 } 2992 2993 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2994 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2995 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2996 2997 ((char *)dest)[size - 1] = '\0'; 2998 mstate->dtms_scratch_ptr += size; 2999 regs[rd] = dest; 3000 break; 3001 } 3002 3003 case DIF_SUBR_MSGSIZE: 3004 case DIF_SUBR_MSGDSIZE: { 3005 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3006 uintptr_t wptr, rptr; 3007 size_t count = 0; 3008 int cont = 0; 3009 3010 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3011 3012 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3013 vstate)) { 3014 regs[rd] = NULL; 3015 break; 3016 } 3017 3018 wptr = dtrace_loadptr(baddr + 3019 offsetof(mblk_t, b_wptr)); 3020 3021 rptr = dtrace_loadptr(baddr + 3022 offsetof(mblk_t, b_rptr)); 3023 3024 if (wptr < rptr) { 3025 *flags |= CPU_DTRACE_BADADDR; 3026 *illval = tupregs[0].dttk_value; 3027 break; 3028 } 3029 3030 daddr = dtrace_loadptr(baddr + 3031 offsetof(mblk_t, b_datap)); 3032 3033 baddr = dtrace_loadptr(baddr + 3034 offsetof(mblk_t, b_cont)); 3035 3036 /* 3037 * We want to prevent against denial-of-service here, 3038 * so we're only going to search the list for 3039 * dtrace_msgdsize_max mblks. 3040 */ 3041 if (cont++ > dtrace_msgdsize_max) { 3042 *flags |= CPU_DTRACE_ILLOP; 3043 break; 3044 } 3045 3046 if (subr == DIF_SUBR_MSGDSIZE) { 3047 if (dtrace_load8(daddr + 3048 offsetof(dblk_t, db_type)) != M_DATA) 3049 continue; 3050 } 3051 3052 count += wptr - rptr; 3053 } 3054 3055 if (!(*flags & CPU_DTRACE_FAULT)) 3056 regs[rd] = count; 3057 3058 break; 3059 } 3060 3061 case DIF_SUBR_PROGENYOF: { 3062 pid_t pid = tupregs[0].dttk_value; 3063 proc_t *p; 3064 int rval = 0; 3065 3066 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3067 3068 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3069 if (p->p_pidp->pid_id == pid) { 3070 rval = 1; 3071 break; 3072 } 3073 } 3074 3075 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3076 3077 regs[rd] = rval; 3078 break; 3079 } 3080 3081 case DIF_SUBR_SPECULATION: 3082 regs[rd] = dtrace_speculation(state); 3083 break; 3084 3085 case DIF_SUBR_COPYOUT: { 3086 uintptr_t kaddr = tupregs[0].dttk_value; 3087 uintptr_t uaddr = tupregs[1].dttk_value; 3088 uint64_t size = tupregs[2].dttk_value; 3089 3090 if (!dtrace_destructive_disallow && 3091 dtrace_priv_proc_control(state) && 3092 !dtrace_istoxic(kaddr, size)) { 3093 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3094 dtrace_copyout(kaddr, uaddr, size); 3095 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3096 } 3097 break; 3098 } 3099 3100 case DIF_SUBR_COPYOUTSTR: { 3101 uintptr_t kaddr = tupregs[0].dttk_value; 3102 uintptr_t uaddr = tupregs[1].dttk_value; 3103 uint64_t size = tupregs[2].dttk_value; 3104 3105 if (!dtrace_destructive_disallow && 3106 dtrace_priv_proc_control(state) && 3107 !dtrace_istoxic(kaddr, size)) { 3108 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3109 dtrace_copyoutstr(kaddr, uaddr, size); 3110 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3111 } 3112 break; 3113 } 3114 3115 case DIF_SUBR_STRLEN: { 3116 size_t sz; 3117 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3118 sz = dtrace_strlen((char *)addr, 3119 state->dts_options[DTRACEOPT_STRSIZE]); 3120 3121 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3122 regs[rd] = NULL; 3123 break; 3124 } 3125 3126 regs[rd] = sz; 3127 3128 break; 3129 } 3130 3131 case DIF_SUBR_STRCHR: 3132 case DIF_SUBR_STRRCHR: { 3133 /* 3134 * We're going to iterate over the string looking for the 3135 * specified character. We will iterate until we have reached 3136 * the string length or we have found the character. If this 3137 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3138 * of the specified character instead of the first. 3139 */ 3140 uintptr_t saddr = tupregs[0].dttk_value; 3141 uintptr_t addr = tupregs[0].dttk_value; 3142 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3143 char c, target = (char)tupregs[1].dttk_value; 3144 3145 for (regs[rd] = NULL; addr < limit; addr++) { 3146 if ((c = dtrace_load8(addr)) == target) { 3147 regs[rd] = addr; 3148 3149 if (subr == DIF_SUBR_STRCHR) 3150 break; 3151 } 3152 3153 if (c == '\0') 3154 break; 3155 } 3156 3157 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3158 regs[rd] = NULL; 3159 break; 3160 } 3161 3162 break; 3163 } 3164 3165 case DIF_SUBR_STRSTR: 3166 case DIF_SUBR_INDEX: 3167 case DIF_SUBR_RINDEX: { 3168 /* 3169 * We're going to iterate over the string looking for the 3170 * specified string. We will iterate until we have reached 3171 * the string length or we have found the string. (Yes, this 3172 * is done in the most naive way possible -- but considering 3173 * that the string we're searching for is likely to be 3174 * relatively short, the complexity of Rabin-Karp or similar 3175 * hardly seems merited.) 3176 */ 3177 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3178 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3179 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3180 size_t len = dtrace_strlen(addr, size); 3181 size_t sublen = dtrace_strlen(substr, size); 3182 char *limit = addr + len, *orig = addr; 3183 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3184 int inc = 1; 3185 3186 regs[rd] = notfound; 3187 3188 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3189 regs[rd] = NULL; 3190 break; 3191 } 3192 3193 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3194 vstate)) { 3195 regs[rd] = NULL; 3196 break; 3197 } 3198 3199 /* 3200 * strstr() and index()/rindex() have similar semantics if 3201 * both strings are the empty string: strstr() returns a 3202 * pointer to the (empty) string, and index() and rindex() 3203 * both return index 0 (regardless of any position argument). 3204 */ 3205 if (sublen == 0 && len == 0) { 3206 if (subr == DIF_SUBR_STRSTR) 3207 regs[rd] = (uintptr_t)addr; 3208 else 3209 regs[rd] = 0; 3210 break; 3211 } 3212 3213 if (subr != DIF_SUBR_STRSTR) { 3214 if (subr == DIF_SUBR_RINDEX) { 3215 limit = orig - 1; 3216 addr += len; 3217 inc = -1; 3218 } 3219 3220 /* 3221 * Both index() and rindex() take an optional position 3222 * argument that denotes the starting position. 3223 */ 3224 if (nargs == 3) { 3225 int64_t pos = (int64_t)tupregs[2].dttk_value; 3226 3227 /* 3228 * If the position argument to index() is 3229 * negative, Perl implicitly clamps it at 3230 * zero. This semantic is a little surprising 3231 * given the special meaning of negative 3232 * positions to similar Perl functions like 3233 * substr(), but it appears to reflect a 3234 * notion that index() can start from a 3235 * negative index and increment its way up to 3236 * the string. Given this notion, Perl's 3237 * rindex() is at least self-consistent in 3238 * that it implicitly clamps positions greater 3239 * than the string length to be the string 3240 * length. Where Perl completely loses 3241 * coherence, however, is when the specified 3242 * substring is the empty string (""). In 3243 * this case, even if the position is 3244 * negative, rindex() returns 0 -- and even if 3245 * the position is greater than the length, 3246 * index() returns the string length. These 3247 * semantics violate the notion that index() 3248 * should never return a value less than the 3249 * specified position and that rindex() should 3250 * never return a value greater than the 3251 * specified position. (One assumes that 3252 * these semantics are artifacts of Perl's 3253 * implementation and not the results of 3254 * deliberate design -- it beggars belief that 3255 * even Larry Wall could desire such oddness.) 3256 * While in the abstract one would wish for 3257 * consistent position semantics across 3258 * substr(), index() and rindex() -- or at the 3259 * very least self-consistent position 3260 * semantics for index() and rindex() -- we 3261 * instead opt to keep with the extant Perl 3262 * semantics, in all their broken glory. (Do 3263 * we have more desire to maintain Perl's 3264 * semantics than Perl does? Probably.) 3265 */ 3266 if (subr == DIF_SUBR_RINDEX) { 3267 if (pos < 0) { 3268 if (sublen == 0) 3269 regs[rd] = 0; 3270 break; 3271 } 3272 3273 if (pos > len) 3274 pos = len; 3275 } else { 3276 if (pos < 0) 3277 pos = 0; 3278 3279 if (pos >= len) { 3280 if (sublen == 0) 3281 regs[rd] = len; 3282 break; 3283 } 3284 } 3285 3286 addr = orig + pos; 3287 } 3288 } 3289 3290 for (regs[rd] = notfound; addr != limit; addr += inc) { 3291 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3292 if (subr != DIF_SUBR_STRSTR) { 3293 /* 3294 * As D index() and rindex() are 3295 * modeled on Perl (and not on awk), 3296 * we return a zero-based (and not a 3297 * one-based) index. (For you Perl 3298 * weenies: no, we're not going to add 3299 * $[ -- and shouldn't you be at a con 3300 * or something?) 3301 */ 3302 regs[rd] = (uintptr_t)(addr - orig); 3303 break; 3304 } 3305 3306 ASSERT(subr == DIF_SUBR_STRSTR); 3307 regs[rd] = (uintptr_t)addr; 3308 break; 3309 } 3310 } 3311 3312 break; 3313 } 3314 3315 case DIF_SUBR_STRTOK: { 3316 uintptr_t addr = tupregs[0].dttk_value; 3317 uintptr_t tokaddr = tupregs[1].dttk_value; 3318 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3319 uintptr_t limit, toklimit = tokaddr + size; 3320 uint8_t c, tokmap[32]; /* 256 / 8 */ 3321 char *dest = (char *)mstate->dtms_scratch_ptr; 3322 int i; 3323 3324 /* 3325 * Check both the token buffer and (later) the input buffer, 3326 * since both could be non-scratch addresses. 3327 */ 3328 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3329 regs[rd] = NULL; 3330 break; 3331 } 3332 3333 if (mstate->dtms_scratch_ptr + size > 3334 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3335 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3336 regs[rd] = NULL; 3337 break; 3338 } 3339 3340 if (addr == NULL) { 3341 /* 3342 * If the address specified is NULL, we use our saved 3343 * strtok pointer from the mstate. Note that this 3344 * means that the saved strtok pointer is _only_ 3345 * valid within multiple enablings of the same probe -- 3346 * it behaves like an implicit clause-local variable. 3347 */ 3348 addr = mstate->dtms_strtok; 3349 } else { 3350 /* 3351 * If the user-specified address is non-NULL we must 3352 * access check it. This is the only time we have 3353 * a chance to do so, since this address may reside 3354 * in the string table of this clause-- future calls 3355 * (when we fetch addr from mstate->dtms_strtok) 3356 * would fail this access check. 3357 */ 3358 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3359 regs[rd] = NULL; 3360 break; 3361 } 3362 } 3363 3364 /* 3365 * First, zero the token map, and then process the token 3366 * string -- setting a bit in the map for every character 3367 * found in the token string. 3368 */ 3369 for (i = 0; i < sizeof (tokmap); i++) 3370 tokmap[i] = 0; 3371 3372 for (; tokaddr < toklimit; tokaddr++) { 3373 if ((c = dtrace_load8(tokaddr)) == '\0') 3374 break; 3375 3376 ASSERT((c >> 3) < sizeof (tokmap)); 3377 tokmap[c >> 3] |= (1 << (c & 0x7)); 3378 } 3379 3380 for (limit = addr + size; addr < limit; addr++) { 3381 /* 3382 * We're looking for a character that is _not_ contained 3383 * in the token string. 3384 */ 3385 if ((c = dtrace_load8(addr)) == '\0') 3386 break; 3387 3388 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3389 break; 3390 } 3391 3392 if (c == '\0') { 3393 /* 3394 * We reached the end of the string without finding 3395 * any character that was not in the token string. 3396 * We return NULL in this case, and we set the saved 3397 * address to NULL as well. 3398 */ 3399 regs[rd] = NULL; 3400 mstate->dtms_strtok = NULL; 3401 break; 3402 } 3403 3404 /* 3405 * From here on, we're copying into the destination string. 3406 */ 3407 for (i = 0; addr < limit && i < size - 1; addr++) { 3408 if ((c = dtrace_load8(addr)) == '\0') 3409 break; 3410 3411 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3412 break; 3413 3414 ASSERT(i < size); 3415 dest[i++] = c; 3416 } 3417 3418 ASSERT(i < size); 3419 dest[i] = '\0'; 3420 regs[rd] = (uintptr_t)dest; 3421 mstate->dtms_scratch_ptr += size; 3422 mstate->dtms_strtok = addr; 3423 break; 3424 } 3425 3426 case DIF_SUBR_SUBSTR: { 3427 uintptr_t s = tupregs[0].dttk_value; 3428 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3429 char *d = (char *)mstate->dtms_scratch_ptr; 3430 int64_t index = (int64_t)tupregs[1].dttk_value; 3431 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3432 size_t len = dtrace_strlen((char *)s, size); 3433 int64_t i = 0; 3434 3435 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3436 regs[rd] = NULL; 3437 break; 3438 } 3439 3440 if (nargs <= 2) 3441 remaining = (int64_t)size; 3442 3443 if (mstate->dtms_scratch_ptr + size > 3444 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3445 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3446 regs[rd] = NULL; 3447 break; 3448 } 3449 3450 if (index < 0) { 3451 index += len; 3452 3453 if (index < 0 && index + remaining > 0) { 3454 remaining += index; 3455 index = 0; 3456 } 3457 } 3458 3459 if (index >= len || index < 0) 3460 index = len; 3461 3462 for (d[0] = '\0'; remaining > 0; remaining--) { 3463 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 3464 break; 3465 3466 if (i == size) { 3467 d[i - 1] = '\0'; 3468 break; 3469 } 3470 } 3471 3472 mstate->dtms_scratch_ptr += size; 3473 regs[rd] = (uintptr_t)d; 3474 break; 3475 } 3476 3477 case DIF_SUBR_GETMAJOR: 3478 #ifdef _LP64 3479 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3480 #else 3481 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3482 #endif 3483 break; 3484 3485 case DIF_SUBR_GETMINOR: 3486 #ifdef _LP64 3487 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3488 #else 3489 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3490 #endif 3491 break; 3492 3493 case DIF_SUBR_DDI_PATHNAME: { 3494 /* 3495 * This one is a galactic mess. We are going to roughly 3496 * emulate ddi_pathname(), but it's made more complicated 3497 * by the fact that we (a) want to include the minor name and 3498 * (b) must proceed iteratively instead of recursively. 3499 */ 3500 uintptr_t dest = mstate->dtms_scratch_ptr; 3501 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3502 char *start = (char *)dest, *end = start + size - 1; 3503 uintptr_t daddr = tupregs[0].dttk_value; 3504 int64_t minor = (int64_t)tupregs[1].dttk_value; 3505 char *s; 3506 int i, len, depth = 0; 3507 3508 /* 3509 * Due to all the pointer jumping we do and context we must 3510 * rely upon, we just mandate that the user must have kernel 3511 * read privileges to use this routine. 3512 */ 3513 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3514 *flags |= CPU_DTRACE_KPRIV; 3515 *illval = daddr; 3516 regs[rd] = NULL; 3517 } 3518 3519 if (size == 0 || mstate->dtms_scratch_ptr + size > 3520 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3521 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3522 regs[rd] = NULL; 3523 break; 3524 } 3525 3526 *end = '\0'; 3527 3528 /* 3529 * We want to have a name for the minor. In order to do this, 3530 * we need to walk the minor list from the devinfo. We want 3531 * to be sure that we don't infinitely walk a circular list, 3532 * so we check for circularity by sending a scout pointer 3533 * ahead two elements for every element that we iterate over; 3534 * if the list is circular, these will ultimately point to the 3535 * same element. You may recognize this little trick as the 3536 * answer to a stupid interview question -- one that always 3537 * seems to be asked by those who had to have it laboriously 3538 * explained to them, and who can't even concisely describe 3539 * the conditions under which one would be forced to resort to 3540 * this technique. Needless to say, those conditions are 3541 * found here -- and probably only here. Is this is the only 3542 * use of this infamous trick in shipping, production code? 3543 * If it isn't, it probably should be... 3544 */ 3545 if (minor != -1) { 3546 uintptr_t maddr = dtrace_loadptr(daddr + 3547 offsetof(struct dev_info, devi_minor)); 3548 3549 uintptr_t next = offsetof(struct ddi_minor_data, next); 3550 uintptr_t name = offsetof(struct ddi_minor_data, 3551 d_minor) + offsetof(struct ddi_minor, name); 3552 uintptr_t dev = offsetof(struct ddi_minor_data, 3553 d_minor) + offsetof(struct ddi_minor, dev); 3554 uintptr_t scout; 3555 3556 if (maddr != NULL) 3557 scout = dtrace_loadptr(maddr + next); 3558 3559 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3560 uint64_t m; 3561 #ifdef _LP64 3562 m = dtrace_load64(maddr + dev) & MAXMIN64; 3563 #else 3564 m = dtrace_load32(maddr + dev) & MAXMIN; 3565 #endif 3566 if (m != minor) { 3567 maddr = dtrace_loadptr(maddr + next); 3568 3569 if (scout == NULL) 3570 continue; 3571 3572 scout = dtrace_loadptr(scout + next); 3573 3574 if (scout == NULL) 3575 continue; 3576 3577 scout = dtrace_loadptr(scout + next); 3578 3579 if (scout == NULL) 3580 continue; 3581 3582 if (scout == maddr) { 3583 *flags |= CPU_DTRACE_ILLOP; 3584 break; 3585 } 3586 3587 continue; 3588 } 3589 3590 /* 3591 * We have the minor data. Now we need to 3592 * copy the minor's name into the end of the 3593 * pathname. 3594 */ 3595 s = (char *)dtrace_loadptr(maddr + name); 3596 len = dtrace_strlen(s, size); 3597 3598 if (*flags & CPU_DTRACE_FAULT) 3599 break; 3600 3601 if (len != 0) { 3602 if ((end -= (len + 1)) < start) 3603 break; 3604 3605 *end = ':'; 3606 } 3607 3608 for (i = 1; i <= len; i++) 3609 end[i] = dtrace_load8((uintptr_t)s++); 3610 break; 3611 } 3612 } 3613 3614 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3615 ddi_node_state_t devi_state; 3616 3617 devi_state = dtrace_load32(daddr + 3618 offsetof(struct dev_info, devi_node_state)); 3619 3620 if (*flags & CPU_DTRACE_FAULT) 3621 break; 3622 3623 if (devi_state >= DS_INITIALIZED) { 3624 s = (char *)dtrace_loadptr(daddr + 3625 offsetof(struct dev_info, devi_addr)); 3626 len = dtrace_strlen(s, size); 3627 3628 if (*flags & CPU_DTRACE_FAULT) 3629 break; 3630 3631 if (len != 0) { 3632 if ((end -= (len + 1)) < start) 3633 break; 3634 3635 *end = '@'; 3636 } 3637 3638 for (i = 1; i <= len; i++) 3639 end[i] = dtrace_load8((uintptr_t)s++); 3640 } 3641 3642 /* 3643 * Now for the node name... 3644 */ 3645 s = (char *)dtrace_loadptr(daddr + 3646 offsetof(struct dev_info, devi_node_name)); 3647 3648 daddr = dtrace_loadptr(daddr + 3649 offsetof(struct dev_info, devi_parent)); 3650 3651 /* 3652 * If our parent is NULL (that is, if we're the root 3653 * node), we're going to use the special path 3654 * "devices". 3655 */ 3656 if (daddr == NULL) 3657 s = "devices"; 3658 3659 len = dtrace_strlen(s, size); 3660 if (*flags & CPU_DTRACE_FAULT) 3661 break; 3662 3663 if ((end -= (len + 1)) < start) 3664 break; 3665 3666 for (i = 1; i <= len; i++) 3667 end[i] = dtrace_load8((uintptr_t)s++); 3668 *end = '/'; 3669 3670 if (depth++ > dtrace_devdepth_max) { 3671 *flags |= CPU_DTRACE_ILLOP; 3672 break; 3673 } 3674 } 3675 3676 if (end < start) 3677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3678 3679 if (daddr == NULL) { 3680 regs[rd] = (uintptr_t)end; 3681 mstate->dtms_scratch_ptr += size; 3682 } 3683 3684 break; 3685 } 3686 3687 case DIF_SUBR_STRJOIN: { 3688 char *d = (char *)mstate->dtms_scratch_ptr; 3689 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3690 uintptr_t s1 = tupregs[0].dttk_value; 3691 uintptr_t s2 = tupregs[1].dttk_value; 3692 int i = 0; 3693 3694 if (!dtrace_strcanload(s1, size, mstate, vstate) || 3695 !dtrace_strcanload(s2, size, mstate, vstate)) { 3696 regs[rd] = NULL; 3697 break; 3698 } 3699 3700 if (mstate->dtms_scratch_ptr + size > 3701 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3702 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3703 regs[rd] = NULL; 3704 break; 3705 } 3706 3707 for (;;) { 3708 if (i >= size) { 3709 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3710 regs[rd] = NULL; 3711 break; 3712 } 3713 3714 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3715 i--; 3716 break; 3717 } 3718 } 3719 3720 for (;;) { 3721 if (i >= size) { 3722 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3723 regs[rd] = NULL; 3724 break; 3725 } 3726 3727 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3728 break; 3729 } 3730 3731 if (i < size) { 3732 mstate->dtms_scratch_ptr += i; 3733 regs[rd] = (uintptr_t)d; 3734 } 3735 3736 break; 3737 } 3738 3739 case DIF_SUBR_LLTOSTR: { 3740 int64_t i = (int64_t)tupregs[0].dttk_value; 3741 int64_t val = i < 0 ? i * -1 : i; 3742 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3743 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3744 3745 if (mstate->dtms_scratch_ptr + size > 3746 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3747 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3748 regs[rd] = NULL; 3749 break; 3750 } 3751 3752 for (*end-- = '\0'; val; val /= 10) 3753 *end-- = '0' + (val % 10); 3754 3755 if (i == 0) 3756 *end-- = '0'; 3757 3758 if (i < 0) 3759 *end-- = '-'; 3760 3761 regs[rd] = (uintptr_t)end + 1; 3762 mstate->dtms_scratch_ptr += size; 3763 break; 3764 } 3765 3766 case DIF_SUBR_HTONS: 3767 case DIF_SUBR_NTOHS: 3768 #ifdef _BIG_ENDIAN 3769 regs[rd] = (uint16_t)tupregs[0].dttk_value; 3770 #else 3771 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 3772 #endif 3773 break; 3774 3775 3776 case DIF_SUBR_HTONL: 3777 case DIF_SUBR_NTOHL: 3778 #ifdef _BIG_ENDIAN 3779 regs[rd] = (uint32_t)tupregs[0].dttk_value; 3780 #else 3781 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 3782 #endif 3783 break; 3784 3785 3786 case DIF_SUBR_HTONLL: 3787 case DIF_SUBR_NTOHLL: 3788 #ifdef _BIG_ENDIAN 3789 regs[rd] = (uint64_t)tupregs[0].dttk_value; 3790 #else 3791 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 3792 #endif 3793 break; 3794 3795 3796 case DIF_SUBR_DIRNAME: 3797 case DIF_SUBR_BASENAME: { 3798 char *dest = (char *)mstate->dtms_scratch_ptr; 3799 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3800 uintptr_t src = tupregs[0].dttk_value; 3801 int i, j, len = dtrace_strlen((char *)src, size); 3802 int lastbase = -1, firstbase = -1, lastdir = -1; 3803 int start, end; 3804 3805 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 3806 regs[rd] = NULL; 3807 break; 3808 } 3809 3810 if (mstate->dtms_scratch_ptr + size > 3811 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3812 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3813 regs[rd] = NULL; 3814 break; 3815 } 3816 3817 /* 3818 * The basename and dirname for a zero-length string is 3819 * defined to be "." 3820 */ 3821 if (len == 0) { 3822 len = 1; 3823 src = (uintptr_t)"."; 3824 } 3825 3826 /* 3827 * Start from the back of the string, moving back toward the 3828 * front until we see a character that isn't a slash. That 3829 * character is the last character in the basename. 3830 */ 3831 for (i = len - 1; i >= 0; i--) { 3832 if (dtrace_load8(src + i) != '/') 3833 break; 3834 } 3835 3836 if (i >= 0) 3837 lastbase = i; 3838 3839 /* 3840 * Starting from the last character in the basename, move 3841 * towards the front until we find a slash. The character 3842 * that we processed immediately before that is the first 3843 * character in the basename. 3844 */ 3845 for (; i >= 0; i--) { 3846 if (dtrace_load8(src + i) == '/') 3847 break; 3848 } 3849 3850 if (i >= 0) 3851 firstbase = i + 1; 3852 3853 /* 3854 * Now keep going until we find a non-slash character. That 3855 * character is the last character in the dirname. 3856 */ 3857 for (; i >= 0; i--) { 3858 if (dtrace_load8(src + i) != '/') 3859 break; 3860 } 3861 3862 if (i >= 0) 3863 lastdir = i; 3864 3865 ASSERT(!(lastbase == -1 && firstbase != -1)); 3866 ASSERT(!(firstbase == -1 && lastdir != -1)); 3867 3868 if (lastbase == -1) { 3869 /* 3870 * We didn't find a non-slash character. We know that 3871 * the length is non-zero, so the whole string must be 3872 * slashes. In either the dirname or the basename 3873 * case, we return '/'. 3874 */ 3875 ASSERT(firstbase == -1); 3876 firstbase = lastbase = lastdir = 0; 3877 } 3878 3879 if (firstbase == -1) { 3880 /* 3881 * The entire string consists only of a basename 3882 * component. If we're looking for dirname, we need 3883 * to change our string to be just "."; if we're 3884 * looking for a basename, we'll just set the first 3885 * character of the basename to be 0. 3886 */ 3887 if (subr == DIF_SUBR_DIRNAME) { 3888 ASSERT(lastdir == -1); 3889 src = (uintptr_t)"."; 3890 lastdir = 0; 3891 } else { 3892 firstbase = 0; 3893 } 3894 } 3895 3896 if (subr == DIF_SUBR_DIRNAME) { 3897 if (lastdir == -1) { 3898 /* 3899 * We know that we have a slash in the name -- 3900 * or lastdir would be set to 0, above. And 3901 * because lastdir is -1, we know that this 3902 * slash must be the first character. (That 3903 * is, the full string must be of the form 3904 * "/basename".) In this case, the last 3905 * character of the directory name is 0. 3906 */ 3907 lastdir = 0; 3908 } 3909 3910 start = 0; 3911 end = lastdir; 3912 } else { 3913 ASSERT(subr == DIF_SUBR_BASENAME); 3914 ASSERT(firstbase != -1 && lastbase != -1); 3915 start = firstbase; 3916 end = lastbase; 3917 } 3918 3919 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3920 dest[j] = dtrace_load8(src + i); 3921 3922 dest[j] = '\0'; 3923 regs[rd] = (uintptr_t)dest; 3924 mstate->dtms_scratch_ptr += size; 3925 break; 3926 } 3927 3928 case DIF_SUBR_CLEANPATH: { 3929 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3930 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3931 uintptr_t src = tupregs[0].dttk_value; 3932 int i = 0, j = 0; 3933 3934 if (!dtrace_strcanload(src, size, mstate, vstate)) { 3935 regs[rd] = NULL; 3936 break; 3937 } 3938 3939 if (mstate->dtms_scratch_ptr + size > 3940 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3941 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3942 regs[rd] = NULL; 3943 break; 3944 } 3945 3946 /* 3947 * Move forward, loading each character. 3948 */ 3949 do { 3950 c = dtrace_load8(src + i++); 3951 next: 3952 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3953 break; 3954 3955 if (c != '/') { 3956 dest[j++] = c; 3957 continue; 3958 } 3959 3960 c = dtrace_load8(src + i++); 3961 3962 if (c == '/') { 3963 /* 3964 * We have two slashes -- we can just advance 3965 * to the next character. 3966 */ 3967 goto next; 3968 } 3969 3970 if (c != '.') { 3971 /* 3972 * This is not "." and it's not ".." -- we can 3973 * just store the "/" and this character and 3974 * drive on. 3975 */ 3976 dest[j++] = '/'; 3977 dest[j++] = c; 3978 continue; 3979 } 3980 3981 c = dtrace_load8(src + i++); 3982 3983 if (c == '/') { 3984 /* 3985 * This is a "/./" component. We're not going 3986 * to store anything in the destination buffer; 3987 * we're just going to go to the next component. 3988 */ 3989 goto next; 3990 } 3991 3992 if (c != '.') { 3993 /* 3994 * This is not ".." -- we can just store the 3995 * "/." and this character and continue 3996 * processing. 3997 */ 3998 dest[j++] = '/'; 3999 dest[j++] = '.'; 4000 dest[j++] = c; 4001 continue; 4002 } 4003 4004 c = dtrace_load8(src + i++); 4005 4006 if (c != '/' && c != '\0') { 4007 /* 4008 * This is not ".." -- it's "..[mumble]". 4009 * We'll store the "/.." and this character 4010 * and continue processing. 4011 */ 4012 dest[j++] = '/'; 4013 dest[j++] = '.'; 4014 dest[j++] = '.'; 4015 dest[j++] = c; 4016 continue; 4017 } 4018 4019 /* 4020 * This is "/../" or "/..\0". We need to back up 4021 * our destination pointer until we find a "/". 4022 */ 4023 i--; 4024 while (j != 0 && dest[--j] != '/') 4025 continue; 4026 4027 if (c == '\0') 4028 dest[++j] = '/'; 4029 } while (c != '\0'); 4030 4031 dest[j] = '\0'; 4032 regs[rd] = (uintptr_t)dest; 4033 mstate->dtms_scratch_ptr += size; 4034 break; 4035 } 4036 } 4037 } 4038 4039 /* 4040 * Emulate the execution of DTrace IR instructions specified by the given 4041 * DIF object. This function is deliberately void of assertions as all of 4042 * the necessary checks are handled by a call to dtrace_difo_validate(). 4043 */ 4044 static uint64_t 4045 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4046 dtrace_vstate_t *vstate, dtrace_state_t *state) 4047 { 4048 const dif_instr_t *text = difo->dtdo_buf; 4049 const uint_t textlen = difo->dtdo_len; 4050 const char *strtab = difo->dtdo_strtab; 4051 const uint64_t *inttab = difo->dtdo_inttab; 4052 4053 uint64_t rval = 0; 4054 dtrace_statvar_t *svar; 4055 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4056 dtrace_difv_t *v; 4057 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4058 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4059 4060 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4061 uint64_t regs[DIF_DIR_NREGS]; 4062 uint64_t *tmp; 4063 4064 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4065 int64_t cc_r; 4066 uint_t pc = 0, id, opc; 4067 uint8_t ttop = 0; 4068 dif_instr_t instr; 4069 uint_t r1, r2, rd; 4070 4071 /* 4072 * We stash the current DIF object into the machine state: we need it 4073 * for subsequent access checking. 4074 */ 4075 mstate->dtms_difo = difo; 4076 4077 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4078 4079 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4080 opc = pc; 4081 4082 instr = text[pc++]; 4083 r1 = DIF_INSTR_R1(instr); 4084 r2 = DIF_INSTR_R2(instr); 4085 rd = DIF_INSTR_RD(instr); 4086 4087 switch (DIF_INSTR_OP(instr)) { 4088 case DIF_OP_OR: 4089 regs[rd] = regs[r1] | regs[r2]; 4090 break; 4091 case DIF_OP_XOR: 4092 regs[rd] = regs[r1] ^ regs[r2]; 4093 break; 4094 case DIF_OP_AND: 4095 regs[rd] = regs[r1] & regs[r2]; 4096 break; 4097 case DIF_OP_SLL: 4098 regs[rd] = regs[r1] << regs[r2]; 4099 break; 4100 case DIF_OP_SRL: 4101 regs[rd] = regs[r1] >> regs[r2]; 4102 break; 4103 case DIF_OP_SUB: 4104 regs[rd] = regs[r1] - regs[r2]; 4105 break; 4106 case DIF_OP_ADD: 4107 regs[rd] = regs[r1] + regs[r2]; 4108 break; 4109 case DIF_OP_MUL: 4110 regs[rd] = regs[r1] * regs[r2]; 4111 break; 4112 case DIF_OP_SDIV: 4113 if (regs[r2] == 0) { 4114 regs[rd] = 0; 4115 *flags |= CPU_DTRACE_DIVZERO; 4116 } else { 4117 regs[rd] = (int64_t)regs[r1] / 4118 (int64_t)regs[r2]; 4119 } 4120 break; 4121 4122 case DIF_OP_UDIV: 4123 if (regs[r2] == 0) { 4124 regs[rd] = 0; 4125 *flags |= CPU_DTRACE_DIVZERO; 4126 } else { 4127 regs[rd] = regs[r1] / regs[r2]; 4128 } 4129 break; 4130 4131 case DIF_OP_SREM: 4132 if (regs[r2] == 0) { 4133 regs[rd] = 0; 4134 *flags |= CPU_DTRACE_DIVZERO; 4135 } else { 4136 regs[rd] = (int64_t)regs[r1] % 4137 (int64_t)regs[r2]; 4138 } 4139 break; 4140 4141 case DIF_OP_UREM: 4142 if (regs[r2] == 0) { 4143 regs[rd] = 0; 4144 *flags |= CPU_DTRACE_DIVZERO; 4145 } else { 4146 regs[rd] = regs[r1] % regs[r2]; 4147 } 4148 break; 4149 4150 case DIF_OP_NOT: 4151 regs[rd] = ~regs[r1]; 4152 break; 4153 case DIF_OP_MOV: 4154 regs[rd] = regs[r1]; 4155 break; 4156 case DIF_OP_CMP: 4157 cc_r = regs[r1] - regs[r2]; 4158 cc_n = cc_r < 0; 4159 cc_z = cc_r == 0; 4160 cc_v = 0; 4161 cc_c = regs[r1] < regs[r2]; 4162 break; 4163 case DIF_OP_TST: 4164 cc_n = cc_v = cc_c = 0; 4165 cc_z = regs[r1] == 0; 4166 break; 4167 case DIF_OP_BA: 4168 pc = DIF_INSTR_LABEL(instr); 4169 break; 4170 case DIF_OP_BE: 4171 if (cc_z) 4172 pc = DIF_INSTR_LABEL(instr); 4173 break; 4174 case DIF_OP_BNE: 4175 if (cc_z == 0) 4176 pc = DIF_INSTR_LABEL(instr); 4177 break; 4178 case DIF_OP_BG: 4179 if ((cc_z | (cc_n ^ cc_v)) == 0) 4180 pc = DIF_INSTR_LABEL(instr); 4181 break; 4182 case DIF_OP_BGU: 4183 if ((cc_c | cc_z) == 0) 4184 pc = DIF_INSTR_LABEL(instr); 4185 break; 4186 case DIF_OP_BGE: 4187 if ((cc_n ^ cc_v) == 0) 4188 pc = DIF_INSTR_LABEL(instr); 4189 break; 4190 case DIF_OP_BGEU: 4191 if (cc_c == 0) 4192 pc = DIF_INSTR_LABEL(instr); 4193 break; 4194 case DIF_OP_BL: 4195 if (cc_n ^ cc_v) 4196 pc = DIF_INSTR_LABEL(instr); 4197 break; 4198 case DIF_OP_BLU: 4199 if (cc_c) 4200 pc = DIF_INSTR_LABEL(instr); 4201 break; 4202 case DIF_OP_BLE: 4203 if (cc_z | (cc_n ^ cc_v)) 4204 pc = DIF_INSTR_LABEL(instr); 4205 break; 4206 case DIF_OP_BLEU: 4207 if (cc_c | cc_z) 4208 pc = DIF_INSTR_LABEL(instr); 4209 break; 4210 case DIF_OP_RLDSB: 4211 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4212 *flags |= CPU_DTRACE_KPRIV; 4213 *illval = regs[r1]; 4214 break; 4215 } 4216 /*FALLTHROUGH*/ 4217 case DIF_OP_LDSB: 4218 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4219 break; 4220 case DIF_OP_RLDSH: 4221 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4222 *flags |= CPU_DTRACE_KPRIV; 4223 *illval = regs[r1]; 4224 break; 4225 } 4226 /*FALLTHROUGH*/ 4227 case DIF_OP_LDSH: 4228 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4229 break; 4230 case DIF_OP_RLDSW: 4231 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4232 *flags |= CPU_DTRACE_KPRIV; 4233 *illval = regs[r1]; 4234 break; 4235 } 4236 /*FALLTHROUGH*/ 4237 case DIF_OP_LDSW: 4238 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4239 break; 4240 case DIF_OP_RLDUB: 4241 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4242 *flags |= CPU_DTRACE_KPRIV; 4243 *illval = regs[r1]; 4244 break; 4245 } 4246 /*FALLTHROUGH*/ 4247 case DIF_OP_LDUB: 4248 regs[rd] = dtrace_load8(regs[r1]); 4249 break; 4250 case DIF_OP_RLDUH: 4251 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4252 *flags |= CPU_DTRACE_KPRIV; 4253 *illval = regs[r1]; 4254 break; 4255 } 4256 /*FALLTHROUGH*/ 4257 case DIF_OP_LDUH: 4258 regs[rd] = dtrace_load16(regs[r1]); 4259 break; 4260 case DIF_OP_RLDUW: 4261 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4262 *flags |= CPU_DTRACE_KPRIV; 4263 *illval = regs[r1]; 4264 break; 4265 } 4266 /*FALLTHROUGH*/ 4267 case DIF_OP_LDUW: 4268 regs[rd] = dtrace_load32(regs[r1]); 4269 break; 4270 case DIF_OP_RLDX: 4271 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4272 *flags |= CPU_DTRACE_KPRIV; 4273 *illval = regs[r1]; 4274 break; 4275 } 4276 /*FALLTHROUGH*/ 4277 case DIF_OP_LDX: 4278 regs[rd] = dtrace_load64(regs[r1]); 4279 break; 4280 case DIF_OP_ULDSB: 4281 regs[rd] = (int8_t) 4282 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4283 break; 4284 case DIF_OP_ULDSH: 4285 regs[rd] = (int16_t) 4286 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4287 break; 4288 case DIF_OP_ULDSW: 4289 regs[rd] = (int32_t) 4290 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4291 break; 4292 case DIF_OP_ULDUB: 4293 regs[rd] = 4294 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4295 break; 4296 case DIF_OP_ULDUH: 4297 regs[rd] = 4298 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4299 break; 4300 case DIF_OP_ULDUW: 4301 regs[rd] = 4302 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4303 break; 4304 case DIF_OP_ULDX: 4305 regs[rd] = 4306 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4307 break; 4308 case DIF_OP_RET: 4309 rval = regs[rd]; 4310 break; 4311 case DIF_OP_NOP: 4312 break; 4313 case DIF_OP_SETX: 4314 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4315 break; 4316 case DIF_OP_SETS: 4317 regs[rd] = (uint64_t)(uintptr_t) 4318 (strtab + DIF_INSTR_STRING(instr)); 4319 break; 4320 case DIF_OP_SCMP: { 4321 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 4322 uintptr_t s1 = regs[r1]; 4323 uintptr_t s2 = regs[r2]; 4324 4325 if (s1 != NULL && 4326 !dtrace_strcanload(s1, sz, mstate, vstate)) 4327 break; 4328 if (s2 != NULL && 4329 !dtrace_strcanload(s2, sz, mstate, vstate)) 4330 break; 4331 4332 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 4333 4334 cc_n = cc_r < 0; 4335 cc_z = cc_r == 0; 4336 cc_v = cc_c = 0; 4337 break; 4338 } 4339 case DIF_OP_LDGA: 4340 regs[rd] = dtrace_dif_variable(mstate, state, 4341 r1, regs[r2]); 4342 break; 4343 case DIF_OP_LDGS: 4344 id = DIF_INSTR_VAR(instr); 4345 4346 if (id >= DIF_VAR_OTHER_UBASE) { 4347 uintptr_t a; 4348 4349 id -= DIF_VAR_OTHER_UBASE; 4350 svar = vstate->dtvs_globals[id]; 4351 ASSERT(svar != NULL); 4352 v = &svar->dtsv_var; 4353 4354 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4355 regs[rd] = svar->dtsv_data; 4356 break; 4357 } 4358 4359 a = (uintptr_t)svar->dtsv_data; 4360 4361 if (*(uint8_t *)a == UINT8_MAX) { 4362 /* 4363 * If the 0th byte is set to UINT8_MAX 4364 * then this is to be treated as a 4365 * reference to a NULL variable. 4366 */ 4367 regs[rd] = NULL; 4368 } else { 4369 regs[rd] = a + sizeof (uint64_t); 4370 } 4371 4372 break; 4373 } 4374 4375 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4376 break; 4377 4378 case DIF_OP_STGS: 4379 id = DIF_INSTR_VAR(instr); 4380 4381 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4382 id -= DIF_VAR_OTHER_UBASE; 4383 4384 svar = vstate->dtvs_globals[id]; 4385 ASSERT(svar != NULL); 4386 v = &svar->dtsv_var; 4387 4388 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4389 uintptr_t a = (uintptr_t)svar->dtsv_data; 4390 4391 ASSERT(a != NULL); 4392 ASSERT(svar->dtsv_size != 0); 4393 4394 if (regs[rd] == NULL) { 4395 *(uint8_t *)a = UINT8_MAX; 4396 break; 4397 } else { 4398 *(uint8_t *)a = 0; 4399 a += sizeof (uint64_t); 4400 } 4401 if (!dtrace_vcanload( 4402 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4403 mstate, vstate)) 4404 break; 4405 4406 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4407 (void *)a, &v->dtdv_type); 4408 break; 4409 } 4410 4411 svar->dtsv_data = regs[rd]; 4412 break; 4413 4414 case DIF_OP_LDTA: 4415 /* 4416 * There are no DTrace built-in thread-local arrays at 4417 * present. This opcode is saved for future work. 4418 */ 4419 *flags |= CPU_DTRACE_ILLOP; 4420 regs[rd] = 0; 4421 break; 4422 4423 case DIF_OP_LDLS: 4424 id = DIF_INSTR_VAR(instr); 4425 4426 if (id < DIF_VAR_OTHER_UBASE) { 4427 /* 4428 * For now, this has no meaning. 4429 */ 4430 regs[rd] = 0; 4431 break; 4432 } 4433 4434 id -= DIF_VAR_OTHER_UBASE; 4435 4436 ASSERT(id < vstate->dtvs_nlocals); 4437 ASSERT(vstate->dtvs_locals != NULL); 4438 4439 svar = vstate->dtvs_locals[id]; 4440 ASSERT(svar != NULL); 4441 v = &svar->dtsv_var; 4442 4443 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4444 uintptr_t a = (uintptr_t)svar->dtsv_data; 4445 size_t sz = v->dtdv_type.dtdt_size; 4446 4447 sz += sizeof (uint64_t); 4448 ASSERT(svar->dtsv_size == NCPU * sz); 4449 a += CPU->cpu_id * sz; 4450 4451 if (*(uint8_t *)a == UINT8_MAX) { 4452 /* 4453 * If the 0th byte is set to UINT8_MAX 4454 * then this is to be treated as a 4455 * reference to a NULL variable. 4456 */ 4457 regs[rd] = NULL; 4458 } else { 4459 regs[rd] = a + sizeof (uint64_t); 4460 } 4461 4462 break; 4463 } 4464 4465 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4466 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4467 regs[rd] = tmp[CPU->cpu_id]; 4468 break; 4469 4470 case DIF_OP_STLS: 4471 id = DIF_INSTR_VAR(instr); 4472 4473 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4474 id -= DIF_VAR_OTHER_UBASE; 4475 ASSERT(id < vstate->dtvs_nlocals); 4476 4477 ASSERT(vstate->dtvs_locals != NULL); 4478 svar = vstate->dtvs_locals[id]; 4479 ASSERT(svar != NULL); 4480 v = &svar->dtsv_var; 4481 4482 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4483 uintptr_t a = (uintptr_t)svar->dtsv_data; 4484 size_t sz = v->dtdv_type.dtdt_size; 4485 4486 sz += sizeof (uint64_t); 4487 ASSERT(svar->dtsv_size == NCPU * sz); 4488 a += CPU->cpu_id * sz; 4489 4490 if (regs[rd] == NULL) { 4491 *(uint8_t *)a = UINT8_MAX; 4492 break; 4493 } else { 4494 *(uint8_t *)a = 0; 4495 a += sizeof (uint64_t); 4496 } 4497 4498 if (!dtrace_vcanload( 4499 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4500 mstate, vstate)) 4501 break; 4502 4503 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4504 (void *)a, &v->dtdv_type); 4505 break; 4506 } 4507 4508 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4509 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4510 tmp[CPU->cpu_id] = regs[rd]; 4511 break; 4512 4513 case DIF_OP_LDTS: { 4514 dtrace_dynvar_t *dvar; 4515 dtrace_key_t *key; 4516 4517 id = DIF_INSTR_VAR(instr); 4518 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4519 id -= DIF_VAR_OTHER_UBASE; 4520 v = &vstate->dtvs_tlocals[id]; 4521 4522 key = &tupregs[DIF_DTR_NREGS]; 4523 key[0].dttk_value = (uint64_t)id; 4524 key[0].dttk_size = 0; 4525 DTRACE_TLS_THRKEY(key[1].dttk_value); 4526 key[1].dttk_size = 0; 4527 4528 dvar = dtrace_dynvar(dstate, 2, key, 4529 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 4530 mstate, vstate); 4531 4532 if (dvar == NULL) { 4533 regs[rd] = 0; 4534 break; 4535 } 4536 4537 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4538 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4539 } else { 4540 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4541 } 4542 4543 break; 4544 } 4545 4546 case DIF_OP_STTS: { 4547 dtrace_dynvar_t *dvar; 4548 dtrace_key_t *key; 4549 4550 id = DIF_INSTR_VAR(instr); 4551 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4552 id -= DIF_VAR_OTHER_UBASE; 4553 4554 key = &tupregs[DIF_DTR_NREGS]; 4555 key[0].dttk_value = (uint64_t)id; 4556 key[0].dttk_size = 0; 4557 DTRACE_TLS_THRKEY(key[1].dttk_value); 4558 key[1].dttk_size = 0; 4559 v = &vstate->dtvs_tlocals[id]; 4560 4561 dvar = dtrace_dynvar(dstate, 2, key, 4562 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4563 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4564 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4565 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4566 4567 /* 4568 * Given that we're storing to thread-local data, 4569 * we need to flush our predicate cache. 4570 */ 4571 curthread->t_predcache = NULL; 4572 4573 if (dvar == NULL) 4574 break; 4575 4576 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4577 if (!dtrace_vcanload( 4578 (void *)(uintptr_t)regs[rd], 4579 &v->dtdv_type, mstate, vstate)) 4580 break; 4581 4582 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4583 dvar->dtdv_data, &v->dtdv_type); 4584 } else { 4585 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4586 } 4587 4588 break; 4589 } 4590 4591 case DIF_OP_SRA: 4592 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 4593 break; 4594 4595 case DIF_OP_CALL: 4596 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 4597 regs, tupregs, ttop, mstate, state); 4598 break; 4599 4600 case DIF_OP_PUSHTR: 4601 if (ttop == DIF_DTR_NREGS) { 4602 *flags |= CPU_DTRACE_TUPOFLOW; 4603 break; 4604 } 4605 4606 if (r1 == DIF_TYPE_STRING) { 4607 /* 4608 * If this is a string type and the size is 0, 4609 * we'll use the system-wide default string 4610 * size. Note that we are _not_ looking at 4611 * the value of the DTRACEOPT_STRSIZE option; 4612 * had this been set, we would expect to have 4613 * a non-zero size value in the "pushtr". 4614 */ 4615 tupregs[ttop].dttk_size = 4616 dtrace_strlen((char *)(uintptr_t)regs[rd], 4617 regs[r2] ? regs[r2] : 4618 dtrace_strsize_default) + 1; 4619 } else { 4620 tupregs[ttop].dttk_size = regs[r2]; 4621 } 4622 4623 tupregs[ttop++].dttk_value = regs[rd]; 4624 break; 4625 4626 case DIF_OP_PUSHTV: 4627 if (ttop == DIF_DTR_NREGS) { 4628 *flags |= CPU_DTRACE_TUPOFLOW; 4629 break; 4630 } 4631 4632 tupregs[ttop].dttk_value = regs[rd]; 4633 tupregs[ttop++].dttk_size = 0; 4634 break; 4635 4636 case DIF_OP_POPTS: 4637 if (ttop != 0) 4638 ttop--; 4639 break; 4640 4641 case DIF_OP_FLUSHTS: 4642 ttop = 0; 4643 break; 4644 4645 case DIF_OP_LDGAA: 4646 case DIF_OP_LDTAA: { 4647 dtrace_dynvar_t *dvar; 4648 dtrace_key_t *key = tupregs; 4649 uint_t nkeys = ttop; 4650 4651 id = DIF_INSTR_VAR(instr); 4652 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4653 id -= DIF_VAR_OTHER_UBASE; 4654 4655 key[nkeys].dttk_value = (uint64_t)id; 4656 key[nkeys++].dttk_size = 0; 4657 4658 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4659 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4660 key[nkeys++].dttk_size = 0; 4661 v = &vstate->dtvs_tlocals[id]; 4662 } else { 4663 v = &vstate->dtvs_globals[id]->dtsv_var; 4664 } 4665 4666 dvar = dtrace_dynvar(dstate, nkeys, key, 4667 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4668 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4669 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 4670 4671 if (dvar == NULL) { 4672 regs[rd] = 0; 4673 break; 4674 } 4675 4676 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4677 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4678 } else { 4679 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4680 } 4681 4682 break; 4683 } 4684 4685 case DIF_OP_STGAA: 4686 case DIF_OP_STTAA: { 4687 dtrace_dynvar_t *dvar; 4688 dtrace_key_t *key = tupregs; 4689 uint_t nkeys = ttop; 4690 4691 id = DIF_INSTR_VAR(instr); 4692 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4693 id -= DIF_VAR_OTHER_UBASE; 4694 4695 key[nkeys].dttk_value = (uint64_t)id; 4696 key[nkeys++].dttk_size = 0; 4697 4698 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4699 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4700 key[nkeys++].dttk_size = 0; 4701 v = &vstate->dtvs_tlocals[id]; 4702 } else { 4703 v = &vstate->dtvs_globals[id]->dtsv_var; 4704 } 4705 4706 dvar = dtrace_dynvar(dstate, nkeys, key, 4707 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4708 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4709 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4710 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4711 4712 if (dvar == NULL) 4713 break; 4714 4715 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4716 if (!dtrace_vcanload( 4717 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4718 mstate, vstate)) 4719 break; 4720 4721 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4722 dvar->dtdv_data, &v->dtdv_type); 4723 } else { 4724 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4725 } 4726 4727 break; 4728 } 4729 4730 case DIF_OP_ALLOCS: { 4731 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4732 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4733 4734 if (mstate->dtms_scratch_ptr + size > 4735 mstate->dtms_scratch_base + 4736 mstate->dtms_scratch_size) { 4737 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4738 regs[rd] = NULL; 4739 } else { 4740 dtrace_bzero((void *) 4741 mstate->dtms_scratch_ptr, size); 4742 mstate->dtms_scratch_ptr += size; 4743 regs[rd] = ptr; 4744 } 4745 break; 4746 } 4747 4748 case DIF_OP_COPYS: 4749 if (!dtrace_canstore(regs[rd], regs[r2], 4750 mstate, vstate)) { 4751 *flags |= CPU_DTRACE_BADADDR; 4752 *illval = regs[rd]; 4753 break; 4754 } 4755 4756 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 4757 break; 4758 4759 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4760 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4761 break; 4762 4763 case DIF_OP_STB: 4764 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4765 *flags |= CPU_DTRACE_BADADDR; 4766 *illval = regs[rd]; 4767 break; 4768 } 4769 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4770 break; 4771 4772 case DIF_OP_STH: 4773 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4774 *flags |= CPU_DTRACE_BADADDR; 4775 *illval = regs[rd]; 4776 break; 4777 } 4778 if (regs[rd] & 1) { 4779 *flags |= CPU_DTRACE_BADALIGN; 4780 *illval = regs[rd]; 4781 break; 4782 } 4783 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4784 break; 4785 4786 case DIF_OP_STW: 4787 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4788 *flags |= CPU_DTRACE_BADADDR; 4789 *illval = regs[rd]; 4790 break; 4791 } 4792 if (regs[rd] & 3) { 4793 *flags |= CPU_DTRACE_BADALIGN; 4794 *illval = regs[rd]; 4795 break; 4796 } 4797 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4798 break; 4799 4800 case DIF_OP_STX: 4801 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4802 *flags |= CPU_DTRACE_BADADDR; 4803 *illval = regs[rd]; 4804 break; 4805 } 4806 if (regs[rd] & 7) { 4807 *flags |= CPU_DTRACE_BADALIGN; 4808 *illval = regs[rd]; 4809 break; 4810 } 4811 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4812 break; 4813 } 4814 } 4815 4816 if (!(*flags & CPU_DTRACE_FAULT)) 4817 return (rval); 4818 4819 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4820 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4821 4822 return (0); 4823 } 4824 4825 static void 4826 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4827 { 4828 dtrace_probe_t *probe = ecb->dte_probe; 4829 dtrace_provider_t *prov = probe->dtpr_provider; 4830 char c[DTRACE_FULLNAMELEN + 80], *str; 4831 char *msg = "dtrace: breakpoint action at probe "; 4832 char *ecbmsg = " (ecb "; 4833 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4834 uintptr_t val = (uintptr_t)ecb; 4835 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4836 4837 if (dtrace_destructive_disallow) 4838 return; 4839 4840 /* 4841 * It's impossible to be taking action on the NULL probe. 4842 */ 4843 ASSERT(probe != NULL); 4844 4845 /* 4846 * This is a poor man's (destitute man's?) sprintf(): we want to 4847 * print the provider name, module name, function name and name of 4848 * the probe, along with the hex address of the ECB with the breakpoint 4849 * action -- all of which we must place in the character buffer by 4850 * hand. 4851 */ 4852 while (*msg != '\0') 4853 c[i++] = *msg++; 4854 4855 for (str = prov->dtpv_name; *str != '\0'; str++) 4856 c[i++] = *str; 4857 c[i++] = ':'; 4858 4859 for (str = probe->dtpr_mod; *str != '\0'; str++) 4860 c[i++] = *str; 4861 c[i++] = ':'; 4862 4863 for (str = probe->dtpr_func; *str != '\0'; str++) 4864 c[i++] = *str; 4865 c[i++] = ':'; 4866 4867 for (str = probe->dtpr_name; *str != '\0'; str++) 4868 c[i++] = *str; 4869 4870 while (*ecbmsg != '\0') 4871 c[i++] = *ecbmsg++; 4872 4873 while (shift >= 0) { 4874 mask = (uintptr_t)0xf << shift; 4875 4876 if (val >= ((uintptr_t)1 << shift)) 4877 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4878 shift -= 4; 4879 } 4880 4881 c[i++] = ')'; 4882 c[i] = '\0'; 4883 4884 debug_enter(c); 4885 } 4886 4887 static void 4888 dtrace_action_panic(dtrace_ecb_t *ecb) 4889 { 4890 dtrace_probe_t *probe = ecb->dte_probe; 4891 4892 /* 4893 * It's impossible to be taking action on the NULL probe. 4894 */ 4895 ASSERT(probe != NULL); 4896 4897 if (dtrace_destructive_disallow) 4898 return; 4899 4900 if (dtrace_panicked != NULL) 4901 return; 4902 4903 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4904 return; 4905 4906 /* 4907 * We won the right to panic. (We want to be sure that only one 4908 * thread calls panic() from dtrace_probe(), and that panic() is 4909 * called exactly once.) 4910 */ 4911 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4912 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4913 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4914 } 4915 4916 static void 4917 dtrace_action_raise(uint64_t sig) 4918 { 4919 if (dtrace_destructive_disallow) 4920 return; 4921 4922 if (sig >= NSIG) { 4923 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4924 return; 4925 } 4926 4927 /* 4928 * raise() has a queue depth of 1 -- we ignore all subsequent 4929 * invocations of the raise() action. 4930 */ 4931 if (curthread->t_dtrace_sig == 0) 4932 curthread->t_dtrace_sig = (uint8_t)sig; 4933 4934 curthread->t_sig_check = 1; 4935 aston(curthread); 4936 } 4937 4938 static void 4939 dtrace_action_stop(void) 4940 { 4941 if (dtrace_destructive_disallow) 4942 return; 4943 4944 if (!curthread->t_dtrace_stop) { 4945 curthread->t_dtrace_stop = 1; 4946 curthread->t_sig_check = 1; 4947 aston(curthread); 4948 } 4949 } 4950 4951 static void 4952 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4953 { 4954 hrtime_t now; 4955 volatile uint16_t *flags; 4956 cpu_t *cpu = CPU; 4957 4958 if (dtrace_destructive_disallow) 4959 return; 4960 4961 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4962 4963 now = dtrace_gethrtime(); 4964 4965 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4966 /* 4967 * We need to advance the mark to the current time. 4968 */ 4969 cpu->cpu_dtrace_chillmark = now; 4970 cpu->cpu_dtrace_chilled = 0; 4971 } 4972 4973 /* 4974 * Now check to see if the requested chill time would take us over 4975 * the maximum amount of time allowed in the chill interval. (Or 4976 * worse, if the calculation itself induces overflow.) 4977 */ 4978 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4979 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4980 *flags |= CPU_DTRACE_ILLOP; 4981 return; 4982 } 4983 4984 while (dtrace_gethrtime() - now < val) 4985 continue; 4986 4987 /* 4988 * Normally, we assure that the value of the variable "timestamp" does 4989 * not change within an ECB. The presence of chill() represents an 4990 * exception to this rule, however. 4991 */ 4992 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4993 cpu->cpu_dtrace_chilled += val; 4994 } 4995 4996 static void 4997 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4998 uint64_t *buf, uint64_t arg) 4999 { 5000 int nframes = DTRACE_USTACK_NFRAMES(arg); 5001 int strsize = DTRACE_USTACK_STRSIZE(arg); 5002 uint64_t *pcs = &buf[1], *fps; 5003 char *str = (char *)&pcs[nframes]; 5004 int size, offs = 0, i, j; 5005 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5006 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5007 char *sym; 5008 5009 /* 5010 * Should be taking a faster path if string space has not been 5011 * allocated. 5012 */ 5013 ASSERT(strsize != 0); 5014 5015 /* 5016 * We will first allocate some temporary space for the frame pointers. 5017 */ 5018 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5019 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5020 (nframes * sizeof (uint64_t)); 5021 5022 if (mstate->dtms_scratch_ptr + size > 5023 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 5024 /* 5025 * Not enough room for our frame pointers -- need to indicate 5026 * that we ran out of scratch space. 5027 */ 5028 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5029 return; 5030 } 5031 5032 mstate->dtms_scratch_ptr += size; 5033 saved = mstate->dtms_scratch_ptr; 5034 5035 /* 5036 * Now get a stack with both program counters and frame pointers. 5037 */ 5038 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5039 dtrace_getufpstack(buf, fps, nframes + 1); 5040 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5041 5042 /* 5043 * If that faulted, we're cooked. 5044 */ 5045 if (*flags & CPU_DTRACE_FAULT) 5046 goto out; 5047 5048 /* 5049 * Now we want to walk up the stack, calling the USTACK helper. For 5050 * each iteration, we restore the scratch pointer. 5051 */ 5052 for (i = 0; i < nframes; i++) { 5053 mstate->dtms_scratch_ptr = saved; 5054 5055 if (offs >= strsize) 5056 break; 5057 5058 sym = (char *)(uintptr_t)dtrace_helper( 5059 DTRACE_HELPER_ACTION_USTACK, 5060 mstate, state, pcs[i], fps[i]); 5061 5062 /* 5063 * If we faulted while running the helper, we're going to 5064 * clear the fault and null out the corresponding string. 5065 */ 5066 if (*flags & CPU_DTRACE_FAULT) { 5067 *flags &= ~CPU_DTRACE_FAULT; 5068 str[offs++] = '\0'; 5069 continue; 5070 } 5071 5072 if (sym == NULL) { 5073 str[offs++] = '\0'; 5074 continue; 5075 } 5076 5077 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5078 5079 /* 5080 * Now copy in the string that the helper returned to us. 5081 */ 5082 for (j = 0; offs + j < strsize; j++) { 5083 if ((str[offs + j] = sym[j]) == '\0') 5084 break; 5085 } 5086 5087 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5088 5089 offs += j + 1; 5090 } 5091 5092 if (offs >= strsize) { 5093 /* 5094 * If we didn't have room for all of the strings, we don't 5095 * abort processing -- this needn't be a fatal error -- but we 5096 * still want to increment a counter (dts_stkstroverflows) to 5097 * allow this condition to be warned about. (If this is from 5098 * a jstack() action, it is easily tuned via jstackstrsize.) 5099 */ 5100 dtrace_error(&state->dts_stkstroverflows); 5101 } 5102 5103 while (offs < strsize) 5104 str[offs++] = '\0'; 5105 5106 out: 5107 mstate->dtms_scratch_ptr = old; 5108 } 5109 5110 /* 5111 * If you're looking for the epicenter of DTrace, you just found it. This 5112 * is the function called by the provider to fire a probe -- from which all 5113 * subsequent probe-context DTrace activity emanates. 5114 */ 5115 void 5116 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5117 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5118 { 5119 processorid_t cpuid; 5120 dtrace_icookie_t cookie; 5121 dtrace_probe_t *probe; 5122 dtrace_mstate_t mstate; 5123 dtrace_ecb_t *ecb; 5124 dtrace_action_t *act; 5125 intptr_t offs; 5126 size_t size; 5127 int vtime, onintr; 5128 volatile uint16_t *flags; 5129 hrtime_t now; 5130 5131 /* 5132 * Kick out immediately if this CPU is still being born (in which case 5133 * curthread will be set to -1) 5134 */ 5135 if ((uintptr_t)curthread & 1) 5136 return; 5137 5138 cookie = dtrace_interrupt_disable(); 5139 probe = dtrace_probes[id - 1]; 5140 cpuid = CPU->cpu_id; 5141 onintr = CPU_ON_INTR(CPU); 5142 5143 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5144 probe->dtpr_predcache == curthread->t_predcache) { 5145 /* 5146 * We have hit in the predicate cache; we know that 5147 * this predicate would evaluate to be false. 5148 */ 5149 dtrace_interrupt_enable(cookie); 5150 return; 5151 } 5152 5153 if (panic_quiesce) { 5154 /* 5155 * We don't trace anything if we're panicking. 5156 */ 5157 dtrace_interrupt_enable(cookie); 5158 return; 5159 } 5160 5161 now = dtrace_gethrtime(); 5162 vtime = dtrace_vtime_references != 0; 5163 5164 if (vtime && curthread->t_dtrace_start) 5165 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5166 5167 mstate.dtms_difo = NULL; 5168 mstate.dtms_probe = probe; 5169 mstate.dtms_strtok = NULL; 5170 mstate.dtms_arg[0] = arg0; 5171 mstate.dtms_arg[1] = arg1; 5172 mstate.dtms_arg[2] = arg2; 5173 mstate.dtms_arg[3] = arg3; 5174 mstate.dtms_arg[4] = arg4; 5175 5176 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5177 5178 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5179 dtrace_predicate_t *pred = ecb->dte_predicate; 5180 dtrace_state_t *state = ecb->dte_state; 5181 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5182 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5183 dtrace_vstate_t *vstate = &state->dts_vstate; 5184 dtrace_provider_t *prov = probe->dtpr_provider; 5185 int committed = 0; 5186 caddr_t tomax; 5187 5188 /* 5189 * A little subtlety with the following (seemingly innocuous) 5190 * declaration of the automatic 'val': by looking at the 5191 * code, you might think that it could be declared in the 5192 * action processing loop, below. (That is, it's only used in 5193 * the action processing loop.) However, it must be declared 5194 * out of that scope because in the case of DIF expression 5195 * arguments to aggregating actions, one iteration of the 5196 * action loop will use the last iteration's value. 5197 */ 5198 #ifdef lint 5199 uint64_t val = 0; 5200 #else 5201 uint64_t val; 5202 #endif 5203 5204 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5205 *flags &= ~CPU_DTRACE_ERROR; 5206 5207 if (prov == dtrace_provider) { 5208 /* 5209 * If dtrace itself is the provider of this probe, 5210 * we're only going to continue processing the ECB if 5211 * arg0 (the dtrace_state_t) is equal to the ECB's 5212 * creating state. (This prevents disjoint consumers 5213 * from seeing one another's metaprobes.) 5214 */ 5215 if (arg0 != (uint64_t)(uintptr_t)state) 5216 continue; 5217 } 5218 5219 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5220 /* 5221 * We're not currently active. If our provider isn't 5222 * the dtrace pseudo provider, we're not interested. 5223 */ 5224 if (prov != dtrace_provider) 5225 continue; 5226 5227 /* 5228 * Now we must further check if we are in the BEGIN 5229 * probe. If we are, we will only continue processing 5230 * if we're still in WARMUP -- if one BEGIN enabling 5231 * has invoked the exit() action, we don't want to 5232 * evaluate subsequent BEGIN enablings. 5233 */ 5234 if (probe->dtpr_id == dtrace_probeid_begin && 5235 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5236 ASSERT(state->dts_activity == 5237 DTRACE_ACTIVITY_DRAINING); 5238 continue; 5239 } 5240 } 5241 5242 if (ecb->dte_cond) { 5243 /* 5244 * If the dte_cond bits indicate that this 5245 * consumer is only allowed to see user-mode firings 5246 * of this probe, call the provider's dtps_usermode() 5247 * entry point to check that the probe was fired 5248 * while in a user context. Skip this ECB if that's 5249 * not the case. 5250 */ 5251 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5252 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5253 probe->dtpr_id, probe->dtpr_arg) == 0) 5254 continue; 5255 5256 /* 5257 * This is more subtle than it looks. We have to be 5258 * absolutely certain that CRED() isn't going to 5259 * change out from under us so it's only legit to 5260 * examine that structure if we're in constrained 5261 * situations. Currently, the only times we'll this 5262 * check is if a non-super-user has enabled the 5263 * profile or syscall providers -- providers that 5264 * allow visibility of all processes. For the 5265 * profile case, the check above will ensure that 5266 * we're examining a user context. 5267 */ 5268 if (ecb->dte_cond & DTRACE_COND_OWNER) { 5269 cred_t *cr; 5270 cred_t *s_cr = 5271 ecb->dte_state->dts_cred.dcr_cred; 5272 proc_t *proc; 5273 5274 ASSERT(s_cr != NULL); 5275 5276 if ((cr = CRED()) == NULL || 5277 s_cr->cr_uid != cr->cr_uid || 5278 s_cr->cr_uid != cr->cr_ruid || 5279 s_cr->cr_uid != cr->cr_suid || 5280 s_cr->cr_gid != cr->cr_gid || 5281 s_cr->cr_gid != cr->cr_rgid || 5282 s_cr->cr_gid != cr->cr_sgid || 5283 (proc = ttoproc(curthread)) == NULL || 5284 (proc->p_flag & SNOCD)) 5285 continue; 5286 } 5287 5288 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 5289 cred_t *cr; 5290 cred_t *s_cr = 5291 ecb->dte_state->dts_cred.dcr_cred; 5292 5293 ASSERT(s_cr != NULL); 5294 5295 if ((cr = CRED()) == NULL || 5296 s_cr->cr_zone->zone_id != 5297 cr->cr_zone->zone_id) 5298 continue; 5299 } 5300 } 5301 5302 if (now - state->dts_alive > dtrace_deadman_timeout) { 5303 /* 5304 * We seem to be dead. Unless we (a) have kernel 5305 * destructive permissions (b) have expicitly enabled 5306 * destructive actions and (c) destructive actions have 5307 * not been disabled, we're going to transition into 5308 * the KILLED state, from which no further processing 5309 * on this state will be performed. 5310 */ 5311 if (!dtrace_priv_kernel_destructive(state) || 5312 !state->dts_cred.dcr_destructive || 5313 dtrace_destructive_disallow) { 5314 void *activity = &state->dts_activity; 5315 dtrace_activity_t current; 5316 5317 do { 5318 current = state->dts_activity; 5319 } while (dtrace_cas32(activity, current, 5320 DTRACE_ACTIVITY_KILLED) != current); 5321 5322 continue; 5323 } 5324 } 5325 5326 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 5327 ecb->dte_alignment, state, &mstate)) < 0) 5328 continue; 5329 5330 tomax = buf->dtb_tomax; 5331 ASSERT(tomax != NULL); 5332 5333 if (ecb->dte_size != 0) 5334 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5335 5336 mstate.dtms_epid = ecb->dte_epid; 5337 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5338 5339 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 5340 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 5341 else 5342 mstate.dtms_access = 0; 5343 5344 if (pred != NULL) { 5345 dtrace_difo_t *dp = pred->dtp_difo; 5346 int rval; 5347 5348 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5349 5350 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5351 dtrace_cacheid_t cid = probe->dtpr_predcache; 5352 5353 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5354 /* 5355 * Update the predicate cache... 5356 */ 5357 ASSERT(cid == pred->dtp_cacheid); 5358 curthread->t_predcache = cid; 5359 } 5360 5361 continue; 5362 } 5363 } 5364 5365 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5366 act != NULL; act = act->dta_next) { 5367 size_t valoffs; 5368 dtrace_difo_t *dp; 5369 dtrace_recdesc_t *rec = &act->dta_rec; 5370 5371 size = rec->dtrd_size; 5372 valoffs = offs + rec->dtrd_offset; 5373 5374 if (DTRACEACT_ISAGG(act->dta_kind)) { 5375 uint64_t v = 0xbad; 5376 dtrace_aggregation_t *agg; 5377 5378 agg = (dtrace_aggregation_t *)act; 5379 5380 if ((dp = act->dta_difo) != NULL) 5381 v = dtrace_dif_emulate(dp, 5382 &mstate, vstate, state); 5383 5384 if (*flags & CPU_DTRACE_ERROR) 5385 continue; 5386 5387 /* 5388 * Note that we always pass the expression 5389 * value from the previous iteration of the 5390 * action loop. This value will only be used 5391 * if there is an expression argument to the 5392 * aggregating action, denoted by the 5393 * dtag_hasarg field. 5394 */ 5395 dtrace_aggregate(agg, buf, 5396 offs, aggbuf, v, val); 5397 continue; 5398 } 5399 5400 switch (act->dta_kind) { 5401 case DTRACEACT_STOP: 5402 if (dtrace_priv_proc_destructive(state)) 5403 dtrace_action_stop(); 5404 continue; 5405 5406 case DTRACEACT_BREAKPOINT: 5407 if (dtrace_priv_kernel_destructive(state)) 5408 dtrace_action_breakpoint(ecb); 5409 continue; 5410 5411 case DTRACEACT_PANIC: 5412 if (dtrace_priv_kernel_destructive(state)) 5413 dtrace_action_panic(ecb); 5414 continue; 5415 5416 case DTRACEACT_STACK: 5417 if (!dtrace_priv_kernel(state)) 5418 continue; 5419 5420 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5421 size / sizeof (pc_t), probe->dtpr_aframes, 5422 DTRACE_ANCHORED(probe) ? NULL : 5423 (uint32_t *)arg0); 5424 5425 continue; 5426 5427 case DTRACEACT_JSTACK: 5428 case DTRACEACT_USTACK: 5429 if (!dtrace_priv_proc(state)) 5430 continue; 5431 5432 /* 5433 * See comment in DIF_VAR_PID. 5434 */ 5435 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5436 CPU_ON_INTR(CPU)) { 5437 int depth = DTRACE_USTACK_NFRAMES( 5438 rec->dtrd_arg) + 1; 5439 5440 dtrace_bzero((void *)(tomax + valoffs), 5441 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5442 + depth * sizeof (uint64_t)); 5443 5444 continue; 5445 } 5446 5447 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5448 curproc->p_dtrace_helpers != NULL) { 5449 /* 5450 * This is the slow path -- we have 5451 * allocated string space, and we're 5452 * getting the stack of a process that 5453 * has helpers. Call into a separate 5454 * routine to perform this processing. 5455 */ 5456 dtrace_action_ustack(&mstate, state, 5457 (uint64_t *)(tomax + valoffs), 5458 rec->dtrd_arg); 5459 continue; 5460 } 5461 5462 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5463 dtrace_getupcstack((uint64_t *) 5464 (tomax + valoffs), 5465 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5466 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5467 continue; 5468 5469 default: 5470 break; 5471 } 5472 5473 dp = act->dta_difo; 5474 ASSERT(dp != NULL); 5475 5476 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5477 5478 if (*flags & CPU_DTRACE_ERROR) 5479 continue; 5480 5481 switch (act->dta_kind) { 5482 case DTRACEACT_SPECULATE: 5483 ASSERT(buf == &state->dts_buffer[cpuid]); 5484 buf = dtrace_speculation_buffer(state, 5485 cpuid, val); 5486 5487 if (buf == NULL) { 5488 *flags |= CPU_DTRACE_DROP; 5489 continue; 5490 } 5491 5492 offs = dtrace_buffer_reserve(buf, 5493 ecb->dte_needed, ecb->dte_alignment, 5494 state, NULL); 5495 5496 if (offs < 0) { 5497 *flags |= CPU_DTRACE_DROP; 5498 continue; 5499 } 5500 5501 tomax = buf->dtb_tomax; 5502 ASSERT(tomax != NULL); 5503 5504 if (ecb->dte_size != 0) 5505 DTRACE_STORE(uint32_t, tomax, offs, 5506 ecb->dte_epid); 5507 continue; 5508 5509 case DTRACEACT_CHILL: 5510 if (dtrace_priv_kernel_destructive(state)) 5511 dtrace_action_chill(&mstate, val); 5512 continue; 5513 5514 case DTRACEACT_RAISE: 5515 if (dtrace_priv_proc_destructive(state)) 5516 dtrace_action_raise(val); 5517 continue; 5518 5519 case DTRACEACT_COMMIT: 5520 ASSERT(!committed); 5521 5522 /* 5523 * We need to commit our buffer state. 5524 */ 5525 if (ecb->dte_size) 5526 buf->dtb_offset = offs + ecb->dte_size; 5527 buf = &state->dts_buffer[cpuid]; 5528 dtrace_speculation_commit(state, cpuid, val); 5529 committed = 1; 5530 continue; 5531 5532 case DTRACEACT_DISCARD: 5533 dtrace_speculation_discard(state, cpuid, val); 5534 continue; 5535 5536 case DTRACEACT_DIFEXPR: 5537 case DTRACEACT_LIBACT: 5538 case DTRACEACT_PRINTF: 5539 case DTRACEACT_PRINTA: 5540 case DTRACEACT_SYSTEM: 5541 case DTRACEACT_FREOPEN: 5542 break; 5543 5544 case DTRACEACT_SYM: 5545 case DTRACEACT_MOD: 5546 if (!dtrace_priv_kernel(state)) 5547 continue; 5548 break; 5549 5550 case DTRACEACT_USYM: 5551 case DTRACEACT_UMOD: 5552 case DTRACEACT_UADDR: { 5553 struct pid *pid = curthread->t_procp->p_pidp; 5554 5555 if (!dtrace_priv_proc(state)) 5556 continue; 5557 5558 DTRACE_STORE(uint64_t, tomax, 5559 valoffs, (uint64_t)pid->pid_id); 5560 DTRACE_STORE(uint64_t, tomax, 5561 valoffs + sizeof (uint64_t), val); 5562 5563 continue; 5564 } 5565 5566 case DTRACEACT_EXIT: { 5567 /* 5568 * For the exit action, we are going to attempt 5569 * to atomically set our activity to be 5570 * draining. If this fails (either because 5571 * another CPU has beat us to the exit action, 5572 * or because our current activity is something 5573 * other than ACTIVE or WARMUP), we will 5574 * continue. This assures that the exit action 5575 * can be successfully recorded at most once 5576 * when we're in the ACTIVE state. If we're 5577 * encountering the exit() action while in 5578 * COOLDOWN, however, we want to honor the new 5579 * status code. (We know that we're the only 5580 * thread in COOLDOWN, so there is no race.) 5581 */ 5582 void *activity = &state->dts_activity; 5583 dtrace_activity_t current = state->dts_activity; 5584 5585 if (current == DTRACE_ACTIVITY_COOLDOWN) 5586 break; 5587 5588 if (current != DTRACE_ACTIVITY_WARMUP) 5589 current = DTRACE_ACTIVITY_ACTIVE; 5590 5591 if (dtrace_cas32(activity, current, 5592 DTRACE_ACTIVITY_DRAINING) != current) { 5593 *flags |= CPU_DTRACE_DROP; 5594 continue; 5595 } 5596 5597 break; 5598 } 5599 5600 default: 5601 ASSERT(0); 5602 } 5603 5604 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 5605 uintptr_t end = valoffs + size; 5606 5607 if (!dtrace_vcanload((void *)(uintptr_t)val, 5608 &dp->dtdo_rtype, &mstate, vstate)) 5609 continue; 5610 5611 /* 5612 * If this is a string, we're going to only 5613 * load until we find the zero byte -- after 5614 * which we'll store zero bytes. 5615 */ 5616 if (dp->dtdo_rtype.dtdt_kind == 5617 DIF_TYPE_STRING) { 5618 char c = '\0' + 1; 5619 int intuple = act->dta_intuple; 5620 size_t s; 5621 5622 for (s = 0; s < size; s++) { 5623 if (c != '\0') 5624 c = dtrace_load8(val++); 5625 5626 DTRACE_STORE(uint8_t, tomax, 5627 valoffs++, c); 5628 5629 if (c == '\0' && intuple) 5630 break; 5631 } 5632 5633 continue; 5634 } 5635 5636 while (valoffs < end) { 5637 DTRACE_STORE(uint8_t, tomax, valoffs++, 5638 dtrace_load8(val++)); 5639 } 5640 5641 continue; 5642 } 5643 5644 switch (size) { 5645 case 0: 5646 break; 5647 5648 case sizeof (uint8_t): 5649 DTRACE_STORE(uint8_t, tomax, valoffs, val); 5650 break; 5651 case sizeof (uint16_t): 5652 DTRACE_STORE(uint16_t, tomax, valoffs, val); 5653 break; 5654 case sizeof (uint32_t): 5655 DTRACE_STORE(uint32_t, tomax, valoffs, val); 5656 break; 5657 case sizeof (uint64_t): 5658 DTRACE_STORE(uint64_t, tomax, valoffs, val); 5659 break; 5660 default: 5661 /* 5662 * Any other size should have been returned by 5663 * reference, not by value. 5664 */ 5665 ASSERT(0); 5666 break; 5667 } 5668 } 5669 5670 if (*flags & CPU_DTRACE_DROP) 5671 continue; 5672 5673 if (*flags & CPU_DTRACE_FAULT) { 5674 int ndx; 5675 dtrace_action_t *err; 5676 5677 buf->dtb_errors++; 5678 5679 if (probe->dtpr_id == dtrace_probeid_error) { 5680 /* 5681 * There's nothing we can do -- we had an 5682 * error on the error probe. We bump an 5683 * error counter to at least indicate that 5684 * this condition happened. 5685 */ 5686 dtrace_error(&state->dts_dblerrors); 5687 continue; 5688 } 5689 5690 if (vtime) { 5691 /* 5692 * Before recursing on dtrace_probe(), we 5693 * need to explicitly clear out our start 5694 * time to prevent it from being accumulated 5695 * into t_dtrace_vtime. 5696 */ 5697 curthread->t_dtrace_start = 0; 5698 } 5699 5700 /* 5701 * Iterate over the actions to figure out which action 5702 * we were processing when we experienced the error. 5703 * Note that act points _past_ the faulting action; if 5704 * act is ecb->dte_action, the fault was in the 5705 * predicate, if it's ecb->dte_action->dta_next it's 5706 * in action #1, and so on. 5707 */ 5708 for (err = ecb->dte_action, ndx = 0; 5709 err != act; err = err->dta_next, ndx++) 5710 continue; 5711 5712 dtrace_probe_error(state, ecb->dte_epid, ndx, 5713 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5714 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 5715 cpu_core[cpuid].cpuc_dtrace_illval); 5716 5717 continue; 5718 } 5719 5720 if (!committed) 5721 buf->dtb_offset = offs + ecb->dte_size; 5722 } 5723 5724 if (vtime) 5725 curthread->t_dtrace_start = dtrace_gethrtime(); 5726 5727 dtrace_interrupt_enable(cookie); 5728 } 5729 5730 /* 5731 * DTrace Probe Hashing Functions 5732 * 5733 * The functions in this section (and indeed, the functions in remaining 5734 * sections) are not _called_ from probe context. (Any exceptions to this are 5735 * marked with a "Note:".) Rather, they are called from elsewhere in the 5736 * DTrace framework to look-up probes in, add probes to and remove probes from 5737 * the DTrace probe hashes. (Each probe is hashed by each element of the 5738 * probe tuple -- allowing for fast lookups, regardless of what was 5739 * specified.) 5740 */ 5741 static uint_t 5742 dtrace_hash_str(char *p) 5743 { 5744 unsigned int g; 5745 uint_t hval = 0; 5746 5747 while (*p) { 5748 hval = (hval << 4) + *p++; 5749 if ((g = (hval & 0xf0000000)) != 0) 5750 hval ^= g >> 24; 5751 hval &= ~g; 5752 } 5753 return (hval); 5754 } 5755 5756 static dtrace_hash_t * 5757 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5758 { 5759 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5760 5761 hash->dth_stroffs = stroffs; 5762 hash->dth_nextoffs = nextoffs; 5763 hash->dth_prevoffs = prevoffs; 5764 5765 hash->dth_size = 1; 5766 hash->dth_mask = hash->dth_size - 1; 5767 5768 hash->dth_tab = kmem_zalloc(hash->dth_size * 5769 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5770 5771 return (hash); 5772 } 5773 5774 static void 5775 dtrace_hash_destroy(dtrace_hash_t *hash) 5776 { 5777 #ifdef DEBUG 5778 int i; 5779 5780 for (i = 0; i < hash->dth_size; i++) 5781 ASSERT(hash->dth_tab[i] == NULL); 5782 #endif 5783 5784 kmem_free(hash->dth_tab, 5785 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5786 kmem_free(hash, sizeof (dtrace_hash_t)); 5787 } 5788 5789 static void 5790 dtrace_hash_resize(dtrace_hash_t *hash) 5791 { 5792 int size = hash->dth_size, i, ndx; 5793 int new_size = hash->dth_size << 1; 5794 int new_mask = new_size - 1; 5795 dtrace_hashbucket_t **new_tab, *bucket, *next; 5796 5797 ASSERT((new_size & new_mask) == 0); 5798 5799 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5800 5801 for (i = 0; i < size; i++) { 5802 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5803 dtrace_probe_t *probe = bucket->dthb_chain; 5804 5805 ASSERT(probe != NULL); 5806 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5807 5808 next = bucket->dthb_next; 5809 bucket->dthb_next = new_tab[ndx]; 5810 new_tab[ndx] = bucket; 5811 } 5812 } 5813 5814 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5815 hash->dth_tab = new_tab; 5816 hash->dth_size = new_size; 5817 hash->dth_mask = new_mask; 5818 } 5819 5820 static void 5821 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5822 { 5823 int hashval = DTRACE_HASHSTR(hash, new); 5824 int ndx = hashval & hash->dth_mask; 5825 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5826 dtrace_probe_t **nextp, **prevp; 5827 5828 for (; bucket != NULL; bucket = bucket->dthb_next) { 5829 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5830 goto add; 5831 } 5832 5833 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5834 dtrace_hash_resize(hash); 5835 dtrace_hash_add(hash, new); 5836 return; 5837 } 5838 5839 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5840 bucket->dthb_next = hash->dth_tab[ndx]; 5841 hash->dth_tab[ndx] = bucket; 5842 hash->dth_nbuckets++; 5843 5844 add: 5845 nextp = DTRACE_HASHNEXT(hash, new); 5846 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5847 *nextp = bucket->dthb_chain; 5848 5849 if (bucket->dthb_chain != NULL) { 5850 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5851 ASSERT(*prevp == NULL); 5852 *prevp = new; 5853 } 5854 5855 bucket->dthb_chain = new; 5856 bucket->dthb_len++; 5857 } 5858 5859 static dtrace_probe_t * 5860 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5861 { 5862 int hashval = DTRACE_HASHSTR(hash, template); 5863 int ndx = hashval & hash->dth_mask; 5864 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5865 5866 for (; bucket != NULL; bucket = bucket->dthb_next) { 5867 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5868 return (bucket->dthb_chain); 5869 } 5870 5871 return (NULL); 5872 } 5873 5874 static int 5875 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5876 { 5877 int hashval = DTRACE_HASHSTR(hash, template); 5878 int ndx = hashval & hash->dth_mask; 5879 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5880 5881 for (; bucket != NULL; bucket = bucket->dthb_next) { 5882 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5883 return (bucket->dthb_len); 5884 } 5885 5886 return (NULL); 5887 } 5888 5889 static void 5890 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5891 { 5892 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5893 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5894 5895 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5896 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5897 5898 /* 5899 * Find the bucket that we're removing this probe from. 5900 */ 5901 for (; bucket != NULL; bucket = bucket->dthb_next) { 5902 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5903 break; 5904 } 5905 5906 ASSERT(bucket != NULL); 5907 5908 if (*prevp == NULL) { 5909 if (*nextp == NULL) { 5910 /* 5911 * The removed probe was the only probe on this 5912 * bucket; we need to remove the bucket. 5913 */ 5914 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5915 5916 ASSERT(bucket->dthb_chain == probe); 5917 ASSERT(b != NULL); 5918 5919 if (b == bucket) { 5920 hash->dth_tab[ndx] = bucket->dthb_next; 5921 } else { 5922 while (b->dthb_next != bucket) 5923 b = b->dthb_next; 5924 b->dthb_next = bucket->dthb_next; 5925 } 5926 5927 ASSERT(hash->dth_nbuckets > 0); 5928 hash->dth_nbuckets--; 5929 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5930 return; 5931 } 5932 5933 bucket->dthb_chain = *nextp; 5934 } else { 5935 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5936 } 5937 5938 if (*nextp != NULL) 5939 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5940 } 5941 5942 /* 5943 * DTrace Utility Functions 5944 * 5945 * These are random utility functions that are _not_ called from probe context. 5946 */ 5947 static int 5948 dtrace_badattr(const dtrace_attribute_t *a) 5949 { 5950 return (a->dtat_name > DTRACE_STABILITY_MAX || 5951 a->dtat_data > DTRACE_STABILITY_MAX || 5952 a->dtat_class > DTRACE_CLASS_MAX); 5953 } 5954 5955 /* 5956 * Return a duplicate copy of a string. If the specified string is NULL, 5957 * this function returns a zero-length string. 5958 */ 5959 static char * 5960 dtrace_strdup(const char *str) 5961 { 5962 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5963 5964 if (str != NULL) 5965 (void) strcpy(new, str); 5966 5967 return (new); 5968 } 5969 5970 #define DTRACE_ISALPHA(c) \ 5971 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5972 5973 static int 5974 dtrace_badname(const char *s) 5975 { 5976 char c; 5977 5978 if (s == NULL || (c = *s++) == '\0') 5979 return (0); 5980 5981 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5982 return (1); 5983 5984 while ((c = *s++) != '\0') { 5985 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5986 c != '-' && c != '_' && c != '.' && c != '`') 5987 return (1); 5988 } 5989 5990 return (0); 5991 } 5992 5993 static void 5994 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 5995 { 5996 uint32_t priv; 5997 5998 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5999 /* 6000 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6001 */ 6002 priv = DTRACE_PRIV_ALL; 6003 } else { 6004 *uidp = crgetuid(cr); 6005 *zoneidp = crgetzoneid(cr); 6006 6007 priv = 0; 6008 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6009 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6010 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6011 priv |= DTRACE_PRIV_USER; 6012 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6013 priv |= DTRACE_PRIV_PROC; 6014 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6015 priv |= DTRACE_PRIV_OWNER; 6016 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6017 priv |= DTRACE_PRIV_ZONEOWNER; 6018 } 6019 6020 *privp = priv; 6021 } 6022 6023 #ifdef DTRACE_ERRDEBUG 6024 static void 6025 dtrace_errdebug(const char *str) 6026 { 6027 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6028 int occupied = 0; 6029 6030 mutex_enter(&dtrace_errlock); 6031 dtrace_errlast = str; 6032 dtrace_errthread = curthread; 6033 6034 while (occupied++ < DTRACE_ERRHASHSZ) { 6035 if (dtrace_errhash[hval].dter_msg == str) { 6036 dtrace_errhash[hval].dter_count++; 6037 goto out; 6038 } 6039 6040 if (dtrace_errhash[hval].dter_msg != NULL) { 6041 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6042 continue; 6043 } 6044 6045 dtrace_errhash[hval].dter_msg = str; 6046 dtrace_errhash[hval].dter_count = 1; 6047 goto out; 6048 } 6049 6050 panic("dtrace: undersized error hash"); 6051 out: 6052 mutex_exit(&dtrace_errlock); 6053 } 6054 #endif 6055 6056 /* 6057 * DTrace Matching Functions 6058 * 6059 * These functions are used to match groups of probes, given some elements of 6060 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6061 */ 6062 static int 6063 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6064 zoneid_t zoneid) 6065 { 6066 if (priv != DTRACE_PRIV_ALL) { 6067 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6068 uint32_t match = priv & ppriv; 6069 6070 /* 6071 * No PRIV_DTRACE_* privileges... 6072 */ 6073 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6074 DTRACE_PRIV_KERNEL)) == 0) 6075 return (0); 6076 6077 /* 6078 * No matching bits, but there were bits to match... 6079 */ 6080 if (match == 0 && ppriv != 0) 6081 return (0); 6082 6083 /* 6084 * Need to have permissions to the process, but don't... 6085 */ 6086 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6087 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6088 return (0); 6089 } 6090 6091 /* 6092 * Need to be in the same zone unless we possess the 6093 * privilege to examine all zones. 6094 */ 6095 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6096 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6097 return (0); 6098 } 6099 } 6100 6101 return (1); 6102 } 6103 6104 /* 6105 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6106 * consists of input pattern strings and an ops-vector to evaluate them. 6107 * This function returns >0 for match, 0 for no match, and <0 for error. 6108 */ 6109 static int 6110 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6111 uint32_t priv, uid_t uid, zoneid_t zoneid) 6112 { 6113 dtrace_provider_t *pvp = prp->dtpr_provider; 6114 int rv; 6115 6116 if (pvp->dtpv_defunct) 6117 return (0); 6118 6119 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6120 return (rv); 6121 6122 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6123 return (rv); 6124 6125 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6126 return (rv); 6127 6128 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6129 return (rv); 6130 6131 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6132 return (0); 6133 6134 return (rv); 6135 } 6136 6137 /* 6138 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6139 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6140 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6141 * In addition, all of the recursion cases except for '*' matching have been 6142 * unwound. For '*', we still implement recursive evaluation, but a depth 6143 * counter is maintained and matching is aborted if we recurse too deep. 6144 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6145 */ 6146 static int 6147 dtrace_match_glob(const char *s, const char *p, int depth) 6148 { 6149 const char *olds; 6150 char s1, c; 6151 int gs; 6152 6153 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6154 return (-1); 6155 6156 if (s == NULL) 6157 s = ""; /* treat NULL as empty string */ 6158 6159 top: 6160 olds = s; 6161 s1 = *s++; 6162 6163 if (p == NULL) 6164 return (0); 6165 6166 if ((c = *p++) == '\0') 6167 return (s1 == '\0'); 6168 6169 switch (c) { 6170 case '[': { 6171 int ok = 0, notflag = 0; 6172 char lc = '\0'; 6173 6174 if (s1 == '\0') 6175 return (0); 6176 6177 if (*p == '!') { 6178 notflag = 1; 6179 p++; 6180 } 6181 6182 if ((c = *p++) == '\0') 6183 return (0); 6184 6185 do { 6186 if (c == '-' && lc != '\0' && *p != ']') { 6187 if ((c = *p++) == '\0') 6188 return (0); 6189 if (c == '\\' && (c = *p++) == '\0') 6190 return (0); 6191 6192 if (notflag) { 6193 if (s1 < lc || s1 > c) 6194 ok++; 6195 else 6196 return (0); 6197 } else if (lc <= s1 && s1 <= c) 6198 ok++; 6199 6200 } else if (c == '\\' && (c = *p++) == '\0') 6201 return (0); 6202 6203 lc = c; /* save left-hand 'c' for next iteration */ 6204 6205 if (notflag) { 6206 if (s1 != c) 6207 ok++; 6208 else 6209 return (0); 6210 } else if (s1 == c) 6211 ok++; 6212 6213 if ((c = *p++) == '\0') 6214 return (0); 6215 6216 } while (c != ']'); 6217 6218 if (ok) 6219 goto top; 6220 6221 return (0); 6222 } 6223 6224 case '\\': 6225 if ((c = *p++) == '\0') 6226 return (0); 6227 /*FALLTHRU*/ 6228 6229 default: 6230 if (c != s1) 6231 return (0); 6232 /*FALLTHRU*/ 6233 6234 case '?': 6235 if (s1 != '\0') 6236 goto top; 6237 return (0); 6238 6239 case '*': 6240 while (*p == '*') 6241 p++; /* consecutive *'s are identical to a single one */ 6242 6243 if (*p == '\0') 6244 return (1); 6245 6246 for (s = olds; *s != '\0'; s++) { 6247 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 6248 return (gs); 6249 } 6250 6251 return (0); 6252 } 6253 } 6254 6255 /*ARGSUSED*/ 6256 static int 6257 dtrace_match_string(const char *s, const char *p, int depth) 6258 { 6259 return (s != NULL && strcmp(s, p) == 0); 6260 } 6261 6262 /*ARGSUSED*/ 6263 static int 6264 dtrace_match_nul(const char *s, const char *p, int depth) 6265 { 6266 return (1); /* always match the empty pattern */ 6267 } 6268 6269 /*ARGSUSED*/ 6270 static int 6271 dtrace_match_nonzero(const char *s, const char *p, int depth) 6272 { 6273 return (s != NULL && s[0] != '\0'); 6274 } 6275 6276 static int 6277 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 6278 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 6279 { 6280 dtrace_probe_t template, *probe; 6281 dtrace_hash_t *hash = NULL; 6282 int len, best = INT_MAX, nmatched = 0; 6283 dtrace_id_t i; 6284 6285 ASSERT(MUTEX_HELD(&dtrace_lock)); 6286 6287 /* 6288 * If the probe ID is specified in the key, just lookup by ID and 6289 * invoke the match callback once if a matching probe is found. 6290 */ 6291 if (pkp->dtpk_id != DTRACE_IDNONE) { 6292 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 6293 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 6294 (void) (*matched)(probe, arg); 6295 nmatched++; 6296 } 6297 return (nmatched); 6298 } 6299 6300 template.dtpr_mod = (char *)pkp->dtpk_mod; 6301 template.dtpr_func = (char *)pkp->dtpk_func; 6302 template.dtpr_name = (char *)pkp->dtpk_name; 6303 6304 /* 6305 * We want to find the most distinct of the module name, function 6306 * name, and name. So for each one that is not a glob pattern or 6307 * empty string, we perform a lookup in the corresponding hash and 6308 * use the hash table with the fewest collisions to do our search. 6309 */ 6310 if (pkp->dtpk_mmatch == &dtrace_match_string && 6311 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 6312 best = len; 6313 hash = dtrace_bymod; 6314 } 6315 6316 if (pkp->dtpk_fmatch == &dtrace_match_string && 6317 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 6318 best = len; 6319 hash = dtrace_byfunc; 6320 } 6321 6322 if (pkp->dtpk_nmatch == &dtrace_match_string && 6323 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 6324 best = len; 6325 hash = dtrace_byname; 6326 } 6327 6328 /* 6329 * If we did not select a hash table, iterate over every probe and 6330 * invoke our callback for each one that matches our input probe key. 6331 */ 6332 if (hash == NULL) { 6333 for (i = 0; i < dtrace_nprobes; i++) { 6334 if ((probe = dtrace_probes[i]) == NULL || 6335 dtrace_match_probe(probe, pkp, priv, uid, 6336 zoneid) <= 0) 6337 continue; 6338 6339 nmatched++; 6340 6341 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6342 break; 6343 } 6344 6345 return (nmatched); 6346 } 6347 6348 /* 6349 * If we selected a hash table, iterate over each probe of the same key 6350 * name and invoke the callback for every probe that matches the other 6351 * attributes of our input probe key. 6352 */ 6353 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6354 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6355 6356 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6357 continue; 6358 6359 nmatched++; 6360 6361 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6362 break; 6363 } 6364 6365 return (nmatched); 6366 } 6367 6368 /* 6369 * Return the function pointer dtrace_probecmp() should use to compare the 6370 * specified pattern with a string. For NULL or empty patterns, we select 6371 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6372 * For non-empty non-glob strings, we use dtrace_match_string(). 6373 */ 6374 static dtrace_probekey_f * 6375 dtrace_probekey_func(const char *p) 6376 { 6377 char c; 6378 6379 if (p == NULL || *p == '\0') 6380 return (&dtrace_match_nul); 6381 6382 while ((c = *p++) != '\0') { 6383 if (c == '[' || c == '?' || c == '*' || c == '\\') 6384 return (&dtrace_match_glob); 6385 } 6386 6387 return (&dtrace_match_string); 6388 } 6389 6390 /* 6391 * Build a probe comparison key for use with dtrace_match_probe() from the 6392 * given probe description. By convention, a null key only matches anchored 6393 * probes: if each field is the empty string, reset dtpk_fmatch to 6394 * dtrace_match_nonzero(). 6395 */ 6396 static void 6397 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6398 { 6399 pkp->dtpk_prov = pdp->dtpd_provider; 6400 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6401 6402 pkp->dtpk_mod = pdp->dtpd_mod; 6403 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6404 6405 pkp->dtpk_func = pdp->dtpd_func; 6406 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6407 6408 pkp->dtpk_name = pdp->dtpd_name; 6409 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6410 6411 pkp->dtpk_id = pdp->dtpd_id; 6412 6413 if (pkp->dtpk_id == DTRACE_IDNONE && 6414 pkp->dtpk_pmatch == &dtrace_match_nul && 6415 pkp->dtpk_mmatch == &dtrace_match_nul && 6416 pkp->dtpk_fmatch == &dtrace_match_nul && 6417 pkp->dtpk_nmatch == &dtrace_match_nul) 6418 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6419 } 6420 6421 /* 6422 * DTrace Provider-to-Framework API Functions 6423 * 6424 * These functions implement much of the Provider-to-Framework API, as 6425 * described in <sys/dtrace.h>. The parts of the API not in this section are 6426 * the functions in the API for probe management (found below), and 6427 * dtrace_probe() itself (found above). 6428 */ 6429 6430 /* 6431 * Register the calling provider with the DTrace framework. This should 6432 * generally be called by DTrace providers in their attach(9E) entry point. 6433 */ 6434 int 6435 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6436 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6437 { 6438 dtrace_provider_t *provider; 6439 6440 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6441 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6442 "arguments", name ? name : "<NULL>"); 6443 return (EINVAL); 6444 } 6445 6446 if (name[0] == '\0' || dtrace_badname(name)) { 6447 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6448 "provider name", name); 6449 return (EINVAL); 6450 } 6451 6452 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6453 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6454 pops->dtps_destroy == NULL || 6455 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6456 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6457 "provider ops", name); 6458 return (EINVAL); 6459 } 6460 6461 if (dtrace_badattr(&pap->dtpa_provider) || 6462 dtrace_badattr(&pap->dtpa_mod) || 6463 dtrace_badattr(&pap->dtpa_func) || 6464 dtrace_badattr(&pap->dtpa_name) || 6465 dtrace_badattr(&pap->dtpa_args)) { 6466 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6467 "provider attributes", name); 6468 return (EINVAL); 6469 } 6470 6471 if (priv & ~DTRACE_PRIV_ALL) { 6472 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6473 "privilege attributes", name); 6474 return (EINVAL); 6475 } 6476 6477 if ((priv & DTRACE_PRIV_KERNEL) && 6478 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 6479 pops->dtps_usermode == NULL) { 6480 cmn_err(CE_WARN, "failed to register provider '%s': need " 6481 "dtps_usermode() op for given privilege attributes", name); 6482 return (EINVAL); 6483 } 6484 6485 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 6486 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6487 (void) strcpy(provider->dtpv_name, name); 6488 6489 provider->dtpv_attr = *pap; 6490 provider->dtpv_priv.dtpp_flags = priv; 6491 if (cr != NULL) { 6492 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 6493 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 6494 } 6495 provider->dtpv_pops = *pops; 6496 6497 if (pops->dtps_provide == NULL) { 6498 ASSERT(pops->dtps_provide_module != NULL); 6499 provider->dtpv_pops.dtps_provide = 6500 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 6501 } 6502 6503 if (pops->dtps_provide_module == NULL) { 6504 ASSERT(pops->dtps_provide != NULL); 6505 provider->dtpv_pops.dtps_provide_module = 6506 (void (*)(void *, struct modctl *))dtrace_nullop; 6507 } 6508 6509 if (pops->dtps_suspend == NULL) { 6510 ASSERT(pops->dtps_resume == NULL); 6511 provider->dtpv_pops.dtps_suspend = 6512 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6513 provider->dtpv_pops.dtps_resume = 6514 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6515 } 6516 6517 provider->dtpv_arg = arg; 6518 *idp = (dtrace_provider_id_t)provider; 6519 6520 if (pops == &dtrace_provider_ops) { 6521 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6522 ASSERT(MUTEX_HELD(&dtrace_lock)); 6523 ASSERT(dtrace_anon.dta_enabling == NULL); 6524 6525 /* 6526 * We make sure that the DTrace provider is at the head of 6527 * the provider chain. 6528 */ 6529 provider->dtpv_next = dtrace_provider; 6530 dtrace_provider = provider; 6531 return (0); 6532 } 6533 6534 mutex_enter(&dtrace_provider_lock); 6535 mutex_enter(&dtrace_lock); 6536 6537 /* 6538 * If there is at least one provider registered, we'll add this 6539 * provider after the first provider. 6540 */ 6541 if (dtrace_provider != NULL) { 6542 provider->dtpv_next = dtrace_provider->dtpv_next; 6543 dtrace_provider->dtpv_next = provider; 6544 } else { 6545 dtrace_provider = provider; 6546 } 6547 6548 if (dtrace_retained != NULL) { 6549 dtrace_enabling_provide(provider); 6550 6551 /* 6552 * Now we need to call dtrace_enabling_matchall() -- which 6553 * will acquire cpu_lock and dtrace_lock. We therefore need 6554 * to drop all of our locks before calling into it... 6555 */ 6556 mutex_exit(&dtrace_lock); 6557 mutex_exit(&dtrace_provider_lock); 6558 dtrace_enabling_matchall(); 6559 6560 return (0); 6561 } 6562 6563 mutex_exit(&dtrace_lock); 6564 mutex_exit(&dtrace_provider_lock); 6565 6566 return (0); 6567 } 6568 6569 /* 6570 * Unregister the specified provider from the DTrace framework. This should 6571 * generally be called by DTrace providers in their detach(9E) entry point. 6572 */ 6573 int 6574 dtrace_unregister(dtrace_provider_id_t id) 6575 { 6576 dtrace_provider_t *old = (dtrace_provider_t *)id; 6577 dtrace_provider_t *prev = NULL; 6578 int i, self = 0; 6579 dtrace_probe_t *probe, *first = NULL; 6580 6581 if (old->dtpv_pops.dtps_enable == 6582 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 6583 /* 6584 * If DTrace itself is the provider, we're called with locks 6585 * already held. 6586 */ 6587 ASSERT(old == dtrace_provider); 6588 ASSERT(dtrace_devi != NULL); 6589 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6590 ASSERT(MUTEX_HELD(&dtrace_lock)); 6591 self = 1; 6592 6593 if (dtrace_provider->dtpv_next != NULL) { 6594 /* 6595 * There's another provider here; return failure. 6596 */ 6597 return (EBUSY); 6598 } 6599 } else { 6600 mutex_enter(&dtrace_provider_lock); 6601 mutex_enter(&mod_lock); 6602 mutex_enter(&dtrace_lock); 6603 } 6604 6605 /* 6606 * If anyone has /dev/dtrace open, or if there are anonymous enabled 6607 * probes, we refuse to let providers slither away, unless this 6608 * provider has already been explicitly invalidated. 6609 */ 6610 if (!old->dtpv_defunct && 6611 (dtrace_opens || (dtrace_anon.dta_state != NULL && 6612 dtrace_anon.dta_state->dts_necbs > 0))) { 6613 if (!self) { 6614 mutex_exit(&dtrace_lock); 6615 mutex_exit(&mod_lock); 6616 mutex_exit(&dtrace_provider_lock); 6617 } 6618 return (EBUSY); 6619 } 6620 6621 /* 6622 * Attempt to destroy the probes associated with this provider. 6623 */ 6624 for (i = 0; i < dtrace_nprobes; i++) { 6625 if ((probe = dtrace_probes[i]) == NULL) 6626 continue; 6627 6628 if (probe->dtpr_provider != old) 6629 continue; 6630 6631 if (probe->dtpr_ecb == NULL) 6632 continue; 6633 6634 /* 6635 * We have at least one ECB; we can't remove this provider. 6636 */ 6637 if (!self) { 6638 mutex_exit(&dtrace_lock); 6639 mutex_exit(&mod_lock); 6640 mutex_exit(&dtrace_provider_lock); 6641 } 6642 return (EBUSY); 6643 } 6644 6645 /* 6646 * All of the probes for this provider are disabled; we can safely 6647 * remove all of them from their hash chains and from the probe array. 6648 */ 6649 for (i = 0; i < dtrace_nprobes; i++) { 6650 if ((probe = dtrace_probes[i]) == NULL) 6651 continue; 6652 6653 if (probe->dtpr_provider != old) 6654 continue; 6655 6656 dtrace_probes[i] = NULL; 6657 6658 dtrace_hash_remove(dtrace_bymod, probe); 6659 dtrace_hash_remove(dtrace_byfunc, probe); 6660 dtrace_hash_remove(dtrace_byname, probe); 6661 6662 if (first == NULL) { 6663 first = probe; 6664 probe->dtpr_nextmod = NULL; 6665 } else { 6666 probe->dtpr_nextmod = first; 6667 first = probe; 6668 } 6669 } 6670 6671 /* 6672 * The provider's probes have been removed from the hash chains and 6673 * from the probe array. Now issue a dtrace_sync() to be sure that 6674 * everyone has cleared out from any probe array processing. 6675 */ 6676 dtrace_sync(); 6677 6678 for (probe = first; probe != NULL; probe = first) { 6679 first = probe->dtpr_nextmod; 6680 6681 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6682 probe->dtpr_arg); 6683 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6684 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6685 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6686 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6687 kmem_free(probe, sizeof (dtrace_probe_t)); 6688 } 6689 6690 if ((prev = dtrace_provider) == old) { 6691 ASSERT(self || dtrace_devi == NULL); 6692 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6693 dtrace_provider = old->dtpv_next; 6694 } else { 6695 while (prev != NULL && prev->dtpv_next != old) 6696 prev = prev->dtpv_next; 6697 6698 if (prev == NULL) { 6699 panic("attempt to unregister non-existent " 6700 "dtrace provider %p\n", (void *)id); 6701 } 6702 6703 prev->dtpv_next = old->dtpv_next; 6704 } 6705 6706 if (!self) { 6707 mutex_exit(&dtrace_lock); 6708 mutex_exit(&mod_lock); 6709 mutex_exit(&dtrace_provider_lock); 6710 } 6711 6712 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6713 kmem_free(old, sizeof (dtrace_provider_t)); 6714 6715 return (0); 6716 } 6717 6718 /* 6719 * Invalidate the specified provider. All subsequent probe lookups for the 6720 * specified provider will fail, but its probes will not be removed. 6721 */ 6722 void 6723 dtrace_invalidate(dtrace_provider_id_t id) 6724 { 6725 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6726 6727 ASSERT(pvp->dtpv_pops.dtps_enable != 6728 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6729 6730 mutex_enter(&dtrace_provider_lock); 6731 mutex_enter(&dtrace_lock); 6732 6733 pvp->dtpv_defunct = 1; 6734 6735 mutex_exit(&dtrace_lock); 6736 mutex_exit(&dtrace_provider_lock); 6737 } 6738 6739 /* 6740 * Indicate whether or not DTrace has attached. 6741 */ 6742 int 6743 dtrace_attached(void) 6744 { 6745 /* 6746 * dtrace_provider will be non-NULL iff the DTrace driver has 6747 * attached. (It's non-NULL because DTrace is always itself a 6748 * provider.) 6749 */ 6750 return (dtrace_provider != NULL); 6751 } 6752 6753 /* 6754 * Remove all the unenabled probes for the given provider. This function is 6755 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6756 * -- just as many of its associated probes as it can. 6757 */ 6758 int 6759 dtrace_condense(dtrace_provider_id_t id) 6760 { 6761 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6762 int i; 6763 dtrace_probe_t *probe; 6764 6765 /* 6766 * Make sure this isn't the dtrace provider itself. 6767 */ 6768 ASSERT(prov->dtpv_pops.dtps_enable != 6769 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6770 6771 mutex_enter(&dtrace_provider_lock); 6772 mutex_enter(&dtrace_lock); 6773 6774 /* 6775 * Attempt to destroy the probes associated with this provider. 6776 */ 6777 for (i = 0; i < dtrace_nprobes; i++) { 6778 if ((probe = dtrace_probes[i]) == NULL) 6779 continue; 6780 6781 if (probe->dtpr_provider != prov) 6782 continue; 6783 6784 if (probe->dtpr_ecb != NULL) 6785 continue; 6786 6787 dtrace_probes[i] = NULL; 6788 6789 dtrace_hash_remove(dtrace_bymod, probe); 6790 dtrace_hash_remove(dtrace_byfunc, probe); 6791 dtrace_hash_remove(dtrace_byname, probe); 6792 6793 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 6794 probe->dtpr_arg); 6795 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6796 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6797 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6798 kmem_free(probe, sizeof (dtrace_probe_t)); 6799 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 6800 } 6801 6802 mutex_exit(&dtrace_lock); 6803 mutex_exit(&dtrace_provider_lock); 6804 6805 return (0); 6806 } 6807 6808 /* 6809 * DTrace Probe Management Functions 6810 * 6811 * The functions in this section perform the DTrace probe management, 6812 * including functions to create probes, look-up probes, and call into the 6813 * providers to request that probes be provided. Some of these functions are 6814 * in the Provider-to-Framework API; these functions can be identified by the 6815 * fact that they are not declared "static". 6816 */ 6817 6818 /* 6819 * Create a probe with the specified module name, function name, and name. 6820 */ 6821 dtrace_id_t 6822 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6823 const char *func, const char *name, int aframes, void *arg) 6824 { 6825 dtrace_probe_t *probe, **probes; 6826 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6827 dtrace_id_t id; 6828 6829 if (provider == dtrace_provider) { 6830 ASSERT(MUTEX_HELD(&dtrace_lock)); 6831 } else { 6832 mutex_enter(&dtrace_lock); 6833 } 6834 6835 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6836 VM_BESTFIT | VM_SLEEP); 6837 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6838 6839 probe->dtpr_id = id; 6840 probe->dtpr_gen = dtrace_probegen++; 6841 probe->dtpr_mod = dtrace_strdup(mod); 6842 probe->dtpr_func = dtrace_strdup(func); 6843 probe->dtpr_name = dtrace_strdup(name); 6844 probe->dtpr_arg = arg; 6845 probe->dtpr_aframes = aframes; 6846 probe->dtpr_provider = provider; 6847 6848 dtrace_hash_add(dtrace_bymod, probe); 6849 dtrace_hash_add(dtrace_byfunc, probe); 6850 dtrace_hash_add(dtrace_byname, probe); 6851 6852 if (id - 1 >= dtrace_nprobes) { 6853 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6854 size_t nsize = osize << 1; 6855 6856 if (nsize == 0) { 6857 ASSERT(osize == 0); 6858 ASSERT(dtrace_probes == NULL); 6859 nsize = sizeof (dtrace_probe_t *); 6860 } 6861 6862 probes = kmem_zalloc(nsize, KM_SLEEP); 6863 6864 if (dtrace_probes == NULL) { 6865 ASSERT(osize == 0); 6866 dtrace_probes = probes; 6867 dtrace_nprobes = 1; 6868 } else { 6869 dtrace_probe_t **oprobes = dtrace_probes; 6870 6871 bcopy(oprobes, probes, osize); 6872 dtrace_membar_producer(); 6873 dtrace_probes = probes; 6874 6875 dtrace_sync(); 6876 6877 /* 6878 * All CPUs are now seeing the new probes array; we can 6879 * safely free the old array. 6880 */ 6881 kmem_free(oprobes, osize); 6882 dtrace_nprobes <<= 1; 6883 } 6884 6885 ASSERT(id - 1 < dtrace_nprobes); 6886 } 6887 6888 ASSERT(dtrace_probes[id - 1] == NULL); 6889 dtrace_probes[id - 1] = probe; 6890 6891 if (provider != dtrace_provider) 6892 mutex_exit(&dtrace_lock); 6893 6894 return (id); 6895 } 6896 6897 static dtrace_probe_t * 6898 dtrace_probe_lookup_id(dtrace_id_t id) 6899 { 6900 ASSERT(MUTEX_HELD(&dtrace_lock)); 6901 6902 if (id == 0 || id > dtrace_nprobes) 6903 return (NULL); 6904 6905 return (dtrace_probes[id - 1]); 6906 } 6907 6908 static int 6909 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6910 { 6911 *((dtrace_id_t *)arg) = probe->dtpr_id; 6912 6913 return (DTRACE_MATCH_DONE); 6914 } 6915 6916 /* 6917 * Look up a probe based on provider and one or more of module name, function 6918 * name and probe name. 6919 */ 6920 dtrace_id_t 6921 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6922 const char *func, const char *name) 6923 { 6924 dtrace_probekey_t pkey; 6925 dtrace_id_t id; 6926 int match; 6927 6928 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6929 pkey.dtpk_pmatch = &dtrace_match_string; 6930 pkey.dtpk_mod = mod; 6931 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6932 pkey.dtpk_func = func; 6933 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6934 pkey.dtpk_name = name; 6935 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6936 pkey.dtpk_id = DTRACE_IDNONE; 6937 6938 mutex_enter(&dtrace_lock); 6939 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 6940 dtrace_probe_lookup_match, &id); 6941 mutex_exit(&dtrace_lock); 6942 6943 ASSERT(match == 1 || match == 0); 6944 return (match ? id : 0); 6945 } 6946 6947 /* 6948 * Returns the probe argument associated with the specified probe. 6949 */ 6950 void * 6951 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6952 { 6953 dtrace_probe_t *probe; 6954 void *rval = NULL; 6955 6956 mutex_enter(&dtrace_lock); 6957 6958 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6959 probe->dtpr_provider == (dtrace_provider_t *)id) 6960 rval = probe->dtpr_arg; 6961 6962 mutex_exit(&dtrace_lock); 6963 6964 return (rval); 6965 } 6966 6967 /* 6968 * Copy a probe into a probe description. 6969 */ 6970 static void 6971 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6972 { 6973 bzero(pdp, sizeof (dtrace_probedesc_t)); 6974 pdp->dtpd_id = prp->dtpr_id; 6975 6976 (void) strncpy(pdp->dtpd_provider, 6977 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6978 6979 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6980 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6981 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6982 } 6983 6984 /* 6985 * Called to indicate that a probe -- or probes -- should be provided by a 6986 * specfied provider. If the specified description is NULL, the provider will 6987 * be told to provide all of its probes. (This is done whenever a new 6988 * consumer comes along, or whenever a retained enabling is to be matched.) If 6989 * the specified description is non-NULL, the provider is given the 6990 * opportunity to dynamically provide the specified probe, allowing providers 6991 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6992 * probes.) If the provider is NULL, the operations will be applied to all 6993 * providers; if the provider is non-NULL the operations will only be applied 6994 * to the specified provider. The dtrace_provider_lock must be held, and the 6995 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6996 * will need to grab the dtrace_lock when it reenters the framework through 6997 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6998 */ 6999 static void 7000 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7001 { 7002 struct modctl *ctl; 7003 int all = 0; 7004 7005 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7006 7007 if (prv == NULL) { 7008 all = 1; 7009 prv = dtrace_provider; 7010 } 7011 7012 do { 7013 /* 7014 * First, call the blanket provide operation. 7015 */ 7016 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7017 7018 /* 7019 * Now call the per-module provide operation. We will grab 7020 * mod_lock to prevent the list from being modified. Note 7021 * that this also prevents the mod_busy bits from changing. 7022 * (mod_busy can only be changed with mod_lock held.) 7023 */ 7024 mutex_enter(&mod_lock); 7025 7026 ctl = &modules; 7027 do { 7028 if (ctl->mod_busy || ctl->mod_mp == NULL) 7029 continue; 7030 7031 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7032 7033 } while ((ctl = ctl->mod_next) != &modules); 7034 7035 mutex_exit(&mod_lock); 7036 } while (all && (prv = prv->dtpv_next) != NULL); 7037 } 7038 7039 /* 7040 * Iterate over each probe, and call the Framework-to-Provider API function 7041 * denoted by offs. 7042 */ 7043 static void 7044 dtrace_probe_foreach(uintptr_t offs) 7045 { 7046 dtrace_provider_t *prov; 7047 void (*func)(void *, dtrace_id_t, void *); 7048 dtrace_probe_t *probe; 7049 dtrace_icookie_t cookie; 7050 int i; 7051 7052 /* 7053 * We disable interrupts to walk through the probe array. This is 7054 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7055 * won't see stale data. 7056 */ 7057 cookie = dtrace_interrupt_disable(); 7058 7059 for (i = 0; i < dtrace_nprobes; i++) { 7060 if ((probe = dtrace_probes[i]) == NULL) 7061 continue; 7062 7063 if (probe->dtpr_ecb == NULL) { 7064 /* 7065 * This probe isn't enabled -- don't call the function. 7066 */ 7067 continue; 7068 } 7069 7070 prov = probe->dtpr_provider; 7071 func = *((void(**)(void *, dtrace_id_t, void *)) 7072 ((uintptr_t)&prov->dtpv_pops + offs)); 7073 7074 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7075 } 7076 7077 dtrace_interrupt_enable(cookie); 7078 } 7079 7080 static int 7081 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7082 { 7083 dtrace_probekey_t pkey; 7084 uint32_t priv; 7085 uid_t uid; 7086 zoneid_t zoneid; 7087 7088 ASSERT(MUTEX_HELD(&dtrace_lock)); 7089 dtrace_ecb_create_cache = NULL; 7090 7091 if (desc == NULL) { 7092 /* 7093 * If we're passed a NULL description, we're being asked to 7094 * create an ECB with a NULL probe. 7095 */ 7096 (void) dtrace_ecb_create_enable(NULL, enab); 7097 return (0); 7098 } 7099 7100 dtrace_probekey(desc, &pkey); 7101 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7102 &priv, &uid, &zoneid); 7103 7104 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7105 enab)); 7106 } 7107 7108 /* 7109 * DTrace Helper Provider Functions 7110 */ 7111 static void 7112 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7113 { 7114 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7115 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7116 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7117 } 7118 7119 static void 7120 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7121 const dof_provider_t *dofprov, char *strtab) 7122 { 7123 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7124 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7125 dofprov->dofpv_provattr); 7126 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7127 dofprov->dofpv_modattr); 7128 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7129 dofprov->dofpv_funcattr); 7130 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7131 dofprov->dofpv_nameattr); 7132 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7133 dofprov->dofpv_argsattr); 7134 } 7135 7136 static void 7137 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7138 { 7139 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7140 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7141 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7142 dof_provider_t *provider; 7143 dof_probe_t *probe; 7144 uint32_t *off, *enoff; 7145 uint8_t *arg; 7146 char *strtab; 7147 uint_t i, nprobes; 7148 dtrace_helper_provdesc_t dhpv; 7149 dtrace_helper_probedesc_t dhpb; 7150 dtrace_meta_t *meta = dtrace_meta_pid; 7151 dtrace_mops_t *mops = &meta->dtm_mops; 7152 void *parg; 7153 7154 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7155 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7156 provider->dofpv_strtab * dof->dofh_secsize); 7157 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7158 provider->dofpv_probes * dof->dofh_secsize); 7159 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7160 provider->dofpv_prargs * dof->dofh_secsize); 7161 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7162 provider->dofpv_proffs * dof->dofh_secsize); 7163 7164 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7165 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7166 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7167 enoff = NULL; 7168 7169 /* 7170 * See dtrace_helper_provider_validate(). 7171 */ 7172 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7173 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7174 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7175 provider->dofpv_prenoffs * dof->dofh_secsize); 7176 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 7177 } 7178 7179 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 7180 7181 /* 7182 * Create the provider. 7183 */ 7184 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7185 7186 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 7187 return; 7188 7189 meta->dtm_count++; 7190 7191 /* 7192 * Create the probes. 7193 */ 7194 for (i = 0; i < nprobes; i++) { 7195 probe = (dof_probe_t *)(uintptr_t)(daddr + 7196 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 7197 7198 dhpb.dthpb_mod = dhp->dofhp_mod; 7199 dhpb.dthpb_func = strtab + probe->dofpr_func; 7200 dhpb.dthpb_name = strtab + probe->dofpr_name; 7201 dhpb.dthpb_base = probe->dofpr_addr; 7202 dhpb.dthpb_offs = off + probe->dofpr_offidx; 7203 dhpb.dthpb_noffs = probe->dofpr_noffs; 7204 if (enoff != NULL) { 7205 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 7206 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 7207 } else { 7208 dhpb.dthpb_enoffs = NULL; 7209 dhpb.dthpb_nenoffs = 0; 7210 } 7211 dhpb.dthpb_args = arg + probe->dofpr_argidx; 7212 dhpb.dthpb_nargc = probe->dofpr_nargc; 7213 dhpb.dthpb_xargc = probe->dofpr_xargc; 7214 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 7215 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 7216 7217 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 7218 } 7219 } 7220 7221 static void 7222 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 7223 { 7224 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7225 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7226 int i; 7227 7228 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7229 7230 for (i = 0; i < dof->dofh_secnum; i++) { 7231 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7232 dof->dofh_secoff + i * dof->dofh_secsize); 7233 7234 if (sec->dofs_type != DOF_SECT_PROVIDER) 7235 continue; 7236 7237 dtrace_helper_provide_one(dhp, sec, pid); 7238 } 7239 7240 /* 7241 * We may have just created probes, so we must now rematch against 7242 * any retained enablings. Note that this call will acquire both 7243 * cpu_lock and dtrace_lock; the fact that we are holding 7244 * dtrace_meta_lock now is what defines the ordering with respect to 7245 * these three locks. 7246 */ 7247 dtrace_enabling_matchall(); 7248 } 7249 7250 static void 7251 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7252 { 7253 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7254 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7255 dof_sec_t *str_sec; 7256 dof_provider_t *provider; 7257 char *strtab; 7258 dtrace_helper_provdesc_t dhpv; 7259 dtrace_meta_t *meta = dtrace_meta_pid; 7260 dtrace_mops_t *mops = &meta->dtm_mops; 7261 7262 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7263 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7264 provider->dofpv_strtab * dof->dofh_secsize); 7265 7266 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7267 7268 /* 7269 * Create the provider. 7270 */ 7271 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7272 7273 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 7274 7275 meta->dtm_count--; 7276 } 7277 7278 static void 7279 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 7280 { 7281 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7282 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7283 int i; 7284 7285 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7286 7287 for (i = 0; i < dof->dofh_secnum; i++) { 7288 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7289 dof->dofh_secoff + i * dof->dofh_secsize); 7290 7291 if (sec->dofs_type != DOF_SECT_PROVIDER) 7292 continue; 7293 7294 dtrace_helper_provider_remove_one(dhp, sec, pid); 7295 } 7296 } 7297 7298 /* 7299 * DTrace Meta Provider-to-Framework API Functions 7300 * 7301 * These functions implement the Meta Provider-to-Framework API, as described 7302 * in <sys/dtrace.h>. 7303 */ 7304 int 7305 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 7306 dtrace_meta_provider_id_t *idp) 7307 { 7308 dtrace_meta_t *meta; 7309 dtrace_helpers_t *help, *next; 7310 int i; 7311 7312 *idp = DTRACE_METAPROVNONE; 7313 7314 /* 7315 * We strictly don't need the name, but we hold onto it for 7316 * debuggability. All hail error queues! 7317 */ 7318 if (name == NULL) { 7319 cmn_err(CE_WARN, "failed to register meta-provider: " 7320 "invalid name"); 7321 return (EINVAL); 7322 } 7323 7324 if (mops == NULL || 7325 mops->dtms_create_probe == NULL || 7326 mops->dtms_provide_pid == NULL || 7327 mops->dtms_remove_pid == NULL) { 7328 cmn_err(CE_WARN, "failed to register meta-register %s: " 7329 "invalid ops", name); 7330 return (EINVAL); 7331 } 7332 7333 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 7334 meta->dtm_mops = *mops; 7335 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7336 (void) strcpy(meta->dtm_name, name); 7337 meta->dtm_arg = arg; 7338 7339 mutex_enter(&dtrace_meta_lock); 7340 mutex_enter(&dtrace_lock); 7341 7342 if (dtrace_meta_pid != NULL) { 7343 mutex_exit(&dtrace_lock); 7344 mutex_exit(&dtrace_meta_lock); 7345 cmn_err(CE_WARN, "failed to register meta-register %s: " 7346 "user-land meta-provider exists", name); 7347 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7348 kmem_free(meta, sizeof (dtrace_meta_t)); 7349 return (EINVAL); 7350 } 7351 7352 dtrace_meta_pid = meta; 7353 *idp = (dtrace_meta_provider_id_t)meta; 7354 7355 /* 7356 * If there are providers and probes ready to go, pass them 7357 * off to the new meta provider now. 7358 */ 7359 7360 help = dtrace_deferred_pid; 7361 dtrace_deferred_pid = NULL; 7362 7363 mutex_exit(&dtrace_lock); 7364 7365 while (help != NULL) { 7366 for (i = 0; i < help->dthps_nprovs; i++) { 7367 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7368 help->dthps_pid); 7369 } 7370 7371 next = help->dthps_next; 7372 help->dthps_next = NULL; 7373 help->dthps_prev = NULL; 7374 help->dthps_deferred = 0; 7375 help = next; 7376 } 7377 7378 mutex_exit(&dtrace_meta_lock); 7379 7380 return (0); 7381 } 7382 7383 int 7384 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7385 { 7386 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7387 7388 mutex_enter(&dtrace_meta_lock); 7389 mutex_enter(&dtrace_lock); 7390 7391 if (old == dtrace_meta_pid) { 7392 pp = &dtrace_meta_pid; 7393 } else { 7394 panic("attempt to unregister non-existent " 7395 "dtrace meta-provider %p\n", (void *)old); 7396 } 7397 7398 if (old->dtm_count != 0) { 7399 mutex_exit(&dtrace_lock); 7400 mutex_exit(&dtrace_meta_lock); 7401 return (EBUSY); 7402 } 7403 7404 *pp = NULL; 7405 7406 mutex_exit(&dtrace_lock); 7407 mutex_exit(&dtrace_meta_lock); 7408 7409 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7410 kmem_free(old, sizeof (dtrace_meta_t)); 7411 7412 return (0); 7413 } 7414 7415 7416 /* 7417 * DTrace DIF Object Functions 7418 */ 7419 static int 7420 dtrace_difo_err(uint_t pc, const char *format, ...) 7421 { 7422 if (dtrace_err_verbose) { 7423 va_list alist; 7424 7425 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7426 va_start(alist, format); 7427 (void) vuprintf(format, alist); 7428 va_end(alist); 7429 } 7430 7431 #ifdef DTRACE_ERRDEBUG 7432 dtrace_errdebug(format); 7433 #endif 7434 return (1); 7435 } 7436 7437 /* 7438 * Validate a DTrace DIF object by checking the IR instructions. The following 7439 * rules are currently enforced by dtrace_difo_validate(): 7440 * 7441 * 1. Each instruction must have a valid opcode 7442 * 2. Each register, string, variable, or subroutine reference must be valid 7443 * 3. No instruction can modify register %r0 (must be zero) 7444 * 4. All instruction reserved bits must be set to zero 7445 * 5. The last instruction must be a "ret" instruction 7446 * 6. All branch targets must reference a valid instruction _after_ the branch 7447 */ 7448 static int 7449 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7450 cred_t *cr) 7451 { 7452 int err = 0, i; 7453 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7454 int kcheckload; 7455 uint_t pc; 7456 7457 kcheckload = cr == NULL || 7458 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 7459 7460 dp->dtdo_destructive = 0; 7461 7462 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 7463 dif_instr_t instr = dp->dtdo_buf[pc]; 7464 7465 uint_t r1 = DIF_INSTR_R1(instr); 7466 uint_t r2 = DIF_INSTR_R2(instr); 7467 uint_t rd = DIF_INSTR_RD(instr); 7468 uint_t rs = DIF_INSTR_RS(instr); 7469 uint_t label = DIF_INSTR_LABEL(instr); 7470 uint_t v = DIF_INSTR_VAR(instr); 7471 uint_t subr = DIF_INSTR_SUBR(instr); 7472 uint_t type = DIF_INSTR_TYPE(instr); 7473 uint_t op = DIF_INSTR_OP(instr); 7474 7475 switch (op) { 7476 case DIF_OP_OR: 7477 case DIF_OP_XOR: 7478 case DIF_OP_AND: 7479 case DIF_OP_SLL: 7480 case DIF_OP_SRL: 7481 case DIF_OP_SRA: 7482 case DIF_OP_SUB: 7483 case DIF_OP_ADD: 7484 case DIF_OP_MUL: 7485 case DIF_OP_SDIV: 7486 case DIF_OP_UDIV: 7487 case DIF_OP_SREM: 7488 case DIF_OP_UREM: 7489 case DIF_OP_COPYS: 7490 if (r1 >= nregs) 7491 err += efunc(pc, "invalid register %u\n", r1); 7492 if (r2 >= nregs) 7493 err += efunc(pc, "invalid register %u\n", r2); 7494 if (rd >= nregs) 7495 err += efunc(pc, "invalid register %u\n", rd); 7496 if (rd == 0) 7497 err += efunc(pc, "cannot write to %r0\n"); 7498 break; 7499 case DIF_OP_NOT: 7500 case DIF_OP_MOV: 7501 case DIF_OP_ALLOCS: 7502 if (r1 >= nregs) 7503 err += efunc(pc, "invalid register %u\n", r1); 7504 if (r2 != 0) 7505 err += efunc(pc, "non-zero reserved bits\n"); 7506 if (rd >= nregs) 7507 err += efunc(pc, "invalid register %u\n", rd); 7508 if (rd == 0) 7509 err += efunc(pc, "cannot write to %r0\n"); 7510 break; 7511 case DIF_OP_LDSB: 7512 case DIF_OP_LDSH: 7513 case DIF_OP_LDSW: 7514 case DIF_OP_LDUB: 7515 case DIF_OP_LDUH: 7516 case DIF_OP_LDUW: 7517 case DIF_OP_LDX: 7518 if (r1 >= nregs) 7519 err += efunc(pc, "invalid register %u\n", r1); 7520 if (r2 != 0) 7521 err += efunc(pc, "non-zero reserved bits\n"); 7522 if (rd >= nregs) 7523 err += efunc(pc, "invalid register %u\n", rd); 7524 if (rd == 0) 7525 err += efunc(pc, "cannot write to %r0\n"); 7526 if (kcheckload) 7527 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 7528 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 7529 break; 7530 case DIF_OP_RLDSB: 7531 case DIF_OP_RLDSH: 7532 case DIF_OP_RLDSW: 7533 case DIF_OP_RLDUB: 7534 case DIF_OP_RLDUH: 7535 case DIF_OP_RLDUW: 7536 case DIF_OP_RLDX: 7537 if (r1 >= nregs) 7538 err += efunc(pc, "invalid register %u\n", r1); 7539 if (r2 != 0) 7540 err += efunc(pc, "non-zero reserved bits\n"); 7541 if (rd >= nregs) 7542 err += efunc(pc, "invalid register %u\n", rd); 7543 if (rd == 0) 7544 err += efunc(pc, "cannot write to %r0\n"); 7545 break; 7546 case DIF_OP_ULDSB: 7547 case DIF_OP_ULDSH: 7548 case DIF_OP_ULDSW: 7549 case DIF_OP_ULDUB: 7550 case DIF_OP_ULDUH: 7551 case DIF_OP_ULDUW: 7552 case DIF_OP_ULDX: 7553 if (r1 >= nregs) 7554 err += efunc(pc, "invalid register %u\n", r1); 7555 if (r2 != 0) 7556 err += efunc(pc, "non-zero reserved bits\n"); 7557 if (rd >= nregs) 7558 err += efunc(pc, "invalid register %u\n", rd); 7559 if (rd == 0) 7560 err += efunc(pc, "cannot write to %r0\n"); 7561 break; 7562 case DIF_OP_STB: 7563 case DIF_OP_STH: 7564 case DIF_OP_STW: 7565 case DIF_OP_STX: 7566 if (r1 >= nregs) 7567 err += efunc(pc, "invalid register %u\n", r1); 7568 if (r2 != 0) 7569 err += efunc(pc, "non-zero reserved bits\n"); 7570 if (rd >= nregs) 7571 err += efunc(pc, "invalid register %u\n", rd); 7572 if (rd == 0) 7573 err += efunc(pc, "cannot write to 0 address\n"); 7574 break; 7575 case DIF_OP_CMP: 7576 case DIF_OP_SCMP: 7577 if (r1 >= nregs) 7578 err += efunc(pc, "invalid register %u\n", r1); 7579 if (r2 >= nregs) 7580 err += efunc(pc, "invalid register %u\n", r2); 7581 if (rd != 0) 7582 err += efunc(pc, "non-zero reserved bits\n"); 7583 break; 7584 case DIF_OP_TST: 7585 if (r1 >= nregs) 7586 err += efunc(pc, "invalid register %u\n", r1); 7587 if (r2 != 0 || rd != 0) 7588 err += efunc(pc, "non-zero reserved bits\n"); 7589 break; 7590 case DIF_OP_BA: 7591 case DIF_OP_BE: 7592 case DIF_OP_BNE: 7593 case DIF_OP_BG: 7594 case DIF_OP_BGU: 7595 case DIF_OP_BGE: 7596 case DIF_OP_BGEU: 7597 case DIF_OP_BL: 7598 case DIF_OP_BLU: 7599 case DIF_OP_BLE: 7600 case DIF_OP_BLEU: 7601 if (label >= dp->dtdo_len) { 7602 err += efunc(pc, "invalid branch target %u\n", 7603 label); 7604 } 7605 if (label <= pc) { 7606 err += efunc(pc, "backward branch to %u\n", 7607 label); 7608 } 7609 break; 7610 case DIF_OP_RET: 7611 if (r1 != 0 || r2 != 0) 7612 err += efunc(pc, "non-zero reserved bits\n"); 7613 if (rd >= nregs) 7614 err += efunc(pc, "invalid register %u\n", rd); 7615 break; 7616 case DIF_OP_NOP: 7617 case DIF_OP_POPTS: 7618 case DIF_OP_FLUSHTS: 7619 if (r1 != 0 || r2 != 0 || rd != 0) 7620 err += efunc(pc, "non-zero reserved bits\n"); 7621 break; 7622 case DIF_OP_SETX: 7623 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 7624 err += efunc(pc, "invalid integer ref %u\n", 7625 DIF_INSTR_INTEGER(instr)); 7626 } 7627 if (rd >= nregs) 7628 err += efunc(pc, "invalid register %u\n", rd); 7629 if (rd == 0) 7630 err += efunc(pc, "cannot write to %r0\n"); 7631 break; 7632 case DIF_OP_SETS: 7633 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 7634 err += efunc(pc, "invalid string ref %u\n", 7635 DIF_INSTR_STRING(instr)); 7636 } 7637 if (rd >= nregs) 7638 err += efunc(pc, "invalid register %u\n", rd); 7639 if (rd == 0) 7640 err += efunc(pc, "cannot write to %r0\n"); 7641 break; 7642 case DIF_OP_LDGA: 7643 case DIF_OP_LDTA: 7644 if (r1 > DIF_VAR_ARRAY_MAX) 7645 err += efunc(pc, "invalid array %u\n", r1); 7646 if (r2 >= nregs) 7647 err += efunc(pc, "invalid register %u\n", r2); 7648 if (rd >= nregs) 7649 err += efunc(pc, "invalid register %u\n", rd); 7650 if (rd == 0) 7651 err += efunc(pc, "cannot write to %r0\n"); 7652 break; 7653 case DIF_OP_LDGS: 7654 case DIF_OP_LDTS: 7655 case DIF_OP_LDLS: 7656 case DIF_OP_LDGAA: 7657 case DIF_OP_LDTAA: 7658 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 7659 err += efunc(pc, "invalid variable %u\n", v); 7660 if (rd >= nregs) 7661 err += efunc(pc, "invalid register %u\n", rd); 7662 if (rd == 0) 7663 err += efunc(pc, "cannot write to %r0\n"); 7664 break; 7665 case DIF_OP_STGS: 7666 case DIF_OP_STTS: 7667 case DIF_OP_STLS: 7668 case DIF_OP_STGAA: 7669 case DIF_OP_STTAA: 7670 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 7671 err += efunc(pc, "invalid variable %u\n", v); 7672 if (rs >= nregs) 7673 err += efunc(pc, "invalid register %u\n", rd); 7674 break; 7675 case DIF_OP_CALL: 7676 if (subr > DIF_SUBR_MAX) 7677 err += efunc(pc, "invalid subr %u\n", subr); 7678 if (rd >= nregs) 7679 err += efunc(pc, "invalid register %u\n", rd); 7680 if (rd == 0) 7681 err += efunc(pc, "cannot write to %r0\n"); 7682 7683 if (subr == DIF_SUBR_COPYOUT || 7684 subr == DIF_SUBR_COPYOUTSTR) { 7685 dp->dtdo_destructive = 1; 7686 } 7687 break; 7688 case DIF_OP_PUSHTR: 7689 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 7690 err += efunc(pc, "invalid ref type %u\n", type); 7691 if (r2 >= nregs) 7692 err += efunc(pc, "invalid register %u\n", r2); 7693 if (rs >= nregs) 7694 err += efunc(pc, "invalid register %u\n", rs); 7695 break; 7696 case DIF_OP_PUSHTV: 7697 if (type != DIF_TYPE_CTF) 7698 err += efunc(pc, "invalid val type %u\n", type); 7699 if (r2 >= nregs) 7700 err += efunc(pc, "invalid register %u\n", r2); 7701 if (rs >= nregs) 7702 err += efunc(pc, "invalid register %u\n", rs); 7703 break; 7704 default: 7705 err += efunc(pc, "invalid opcode %u\n", 7706 DIF_INSTR_OP(instr)); 7707 } 7708 } 7709 7710 if (dp->dtdo_len != 0 && 7711 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7712 err += efunc(dp->dtdo_len - 1, 7713 "expected 'ret' as last DIF instruction\n"); 7714 } 7715 7716 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7717 /* 7718 * If we're not returning by reference, the size must be either 7719 * 0 or the size of one of the base types. 7720 */ 7721 switch (dp->dtdo_rtype.dtdt_size) { 7722 case 0: 7723 case sizeof (uint8_t): 7724 case sizeof (uint16_t): 7725 case sizeof (uint32_t): 7726 case sizeof (uint64_t): 7727 break; 7728 7729 default: 7730 err += efunc(dp->dtdo_len - 1, "bad return size"); 7731 } 7732 } 7733 7734 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7735 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7736 dtrace_diftype_t *vt, *et; 7737 uint_t id, ndx; 7738 7739 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7740 v->dtdv_scope != DIFV_SCOPE_THREAD && 7741 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7742 err += efunc(i, "unrecognized variable scope %d\n", 7743 v->dtdv_scope); 7744 break; 7745 } 7746 7747 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7748 v->dtdv_kind != DIFV_KIND_SCALAR) { 7749 err += efunc(i, "unrecognized variable type %d\n", 7750 v->dtdv_kind); 7751 break; 7752 } 7753 7754 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7755 err += efunc(i, "%d exceeds variable id limit\n", id); 7756 break; 7757 } 7758 7759 if (id < DIF_VAR_OTHER_UBASE) 7760 continue; 7761 7762 /* 7763 * For user-defined variables, we need to check that this 7764 * definition is identical to any previous definition that we 7765 * encountered. 7766 */ 7767 ndx = id - DIF_VAR_OTHER_UBASE; 7768 7769 switch (v->dtdv_scope) { 7770 case DIFV_SCOPE_GLOBAL: 7771 if (ndx < vstate->dtvs_nglobals) { 7772 dtrace_statvar_t *svar; 7773 7774 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7775 existing = &svar->dtsv_var; 7776 } 7777 7778 break; 7779 7780 case DIFV_SCOPE_THREAD: 7781 if (ndx < vstate->dtvs_ntlocals) 7782 existing = &vstate->dtvs_tlocals[ndx]; 7783 break; 7784 7785 case DIFV_SCOPE_LOCAL: 7786 if (ndx < vstate->dtvs_nlocals) { 7787 dtrace_statvar_t *svar; 7788 7789 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 7790 existing = &svar->dtsv_var; 7791 } 7792 7793 break; 7794 } 7795 7796 vt = &v->dtdv_type; 7797 7798 if (vt->dtdt_flags & DIF_TF_BYREF) { 7799 if (vt->dtdt_size == 0) { 7800 err += efunc(i, "zero-sized variable\n"); 7801 break; 7802 } 7803 7804 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 7805 vt->dtdt_size > dtrace_global_maxsize) { 7806 err += efunc(i, "oversized by-ref global\n"); 7807 break; 7808 } 7809 } 7810 7811 if (existing == NULL || existing->dtdv_id == 0) 7812 continue; 7813 7814 ASSERT(existing->dtdv_id == v->dtdv_id); 7815 ASSERT(existing->dtdv_scope == v->dtdv_scope); 7816 7817 if (existing->dtdv_kind != v->dtdv_kind) 7818 err += efunc(i, "%d changed variable kind\n", id); 7819 7820 et = &existing->dtdv_type; 7821 7822 if (vt->dtdt_flags != et->dtdt_flags) { 7823 err += efunc(i, "%d changed variable type flags\n", id); 7824 break; 7825 } 7826 7827 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 7828 err += efunc(i, "%d changed variable type size\n", id); 7829 break; 7830 } 7831 } 7832 7833 return (err); 7834 } 7835 7836 /* 7837 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7838 * are much more constrained than normal DIFOs. Specifically, they may 7839 * not: 7840 * 7841 * 1. Make calls to subroutines other than copyin(), copyinstr() or 7842 * miscellaneous string routines 7843 * 2. Access DTrace variables other than the args[] array, and the 7844 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 7845 * 3. Have thread-local variables. 7846 * 4. Have dynamic variables. 7847 */ 7848 static int 7849 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7850 { 7851 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7852 int err = 0; 7853 uint_t pc; 7854 7855 for (pc = 0; pc < dp->dtdo_len; pc++) { 7856 dif_instr_t instr = dp->dtdo_buf[pc]; 7857 7858 uint_t v = DIF_INSTR_VAR(instr); 7859 uint_t subr = DIF_INSTR_SUBR(instr); 7860 uint_t op = DIF_INSTR_OP(instr); 7861 7862 switch (op) { 7863 case DIF_OP_OR: 7864 case DIF_OP_XOR: 7865 case DIF_OP_AND: 7866 case DIF_OP_SLL: 7867 case DIF_OP_SRL: 7868 case DIF_OP_SRA: 7869 case DIF_OP_SUB: 7870 case DIF_OP_ADD: 7871 case DIF_OP_MUL: 7872 case DIF_OP_SDIV: 7873 case DIF_OP_UDIV: 7874 case DIF_OP_SREM: 7875 case DIF_OP_UREM: 7876 case DIF_OP_COPYS: 7877 case DIF_OP_NOT: 7878 case DIF_OP_MOV: 7879 case DIF_OP_RLDSB: 7880 case DIF_OP_RLDSH: 7881 case DIF_OP_RLDSW: 7882 case DIF_OP_RLDUB: 7883 case DIF_OP_RLDUH: 7884 case DIF_OP_RLDUW: 7885 case DIF_OP_RLDX: 7886 case DIF_OP_ULDSB: 7887 case DIF_OP_ULDSH: 7888 case DIF_OP_ULDSW: 7889 case DIF_OP_ULDUB: 7890 case DIF_OP_ULDUH: 7891 case DIF_OP_ULDUW: 7892 case DIF_OP_ULDX: 7893 case DIF_OP_STB: 7894 case DIF_OP_STH: 7895 case DIF_OP_STW: 7896 case DIF_OP_STX: 7897 case DIF_OP_ALLOCS: 7898 case DIF_OP_CMP: 7899 case DIF_OP_SCMP: 7900 case DIF_OP_TST: 7901 case DIF_OP_BA: 7902 case DIF_OP_BE: 7903 case DIF_OP_BNE: 7904 case DIF_OP_BG: 7905 case DIF_OP_BGU: 7906 case DIF_OP_BGE: 7907 case DIF_OP_BGEU: 7908 case DIF_OP_BL: 7909 case DIF_OP_BLU: 7910 case DIF_OP_BLE: 7911 case DIF_OP_BLEU: 7912 case DIF_OP_RET: 7913 case DIF_OP_NOP: 7914 case DIF_OP_POPTS: 7915 case DIF_OP_FLUSHTS: 7916 case DIF_OP_SETX: 7917 case DIF_OP_SETS: 7918 case DIF_OP_LDGA: 7919 case DIF_OP_LDLS: 7920 case DIF_OP_STGS: 7921 case DIF_OP_STLS: 7922 case DIF_OP_PUSHTR: 7923 case DIF_OP_PUSHTV: 7924 break; 7925 7926 case DIF_OP_LDGS: 7927 if (v >= DIF_VAR_OTHER_UBASE) 7928 break; 7929 7930 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7931 break; 7932 7933 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7934 v == DIF_VAR_PPID || v == DIF_VAR_TID || 7935 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 7936 v == DIF_VAR_UID || v == DIF_VAR_GID) 7937 break; 7938 7939 err += efunc(pc, "illegal variable %u\n", v); 7940 break; 7941 7942 case DIF_OP_LDTA: 7943 case DIF_OP_LDTS: 7944 case DIF_OP_LDGAA: 7945 case DIF_OP_LDTAA: 7946 err += efunc(pc, "illegal dynamic variable load\n"); 7947 break; 7948 7949 case DIF_OP_STTS: 7950 case DIF_OP_STGAA: 7951 case DIF_OP_STTAA: 7952 err += efunc(pc, "illegal dynamic variable store\n"); 7953 break; 7954 7955 case DIF_OP_CALL: 7956 if (subr == DIF_SUBR_ALLOCA || 7957 subr == DIF_SUBR_BCOPY || 7958 subr == DIF_SUBR_COPYIN || 7959 subr == DIF_SUBR_COPYINTO || 7960 subr == DIF_SUBR_COPYINSTR || 7961 subr == DIF_SUBR_INDEX || 7962 subr == DIF_SUBR_LLTOSTR || 7963 subr == DIF_SUBR_RINDEX || 7964 subr == DIF_SUBR_STRCHR || 7965 subr == DIF_SUBR_STRJOIN || 7966 subr == DIF_SUBR_STRRCHR || 7967 subr == DIF_SUBR_STRSTR || 7968 subr == DIF_SUBR_HTONS || 7969 subr == DIF_SUBR_HTONL || 7970 subr == DIF_SUBR_HTONLL || 7971 subr == DIF_SUBR_NTOHS || 7972 subr == DIF_SUBR_NTOHL || 7973 subr == DIF_SUBR_NTOHLL) 7974 break; 7975 7976 err += efunc(pc, "invalid subr %u\n", subr); 7977 break; 7978 7979 default: 7980 err += efunc(pc, "invalid opcode %u\n", 7981 DIF_INSTR_OP(instr)); 7982 } 7983 } 7984 7985 return (err); 7986 } 7987 7988 /* 7989 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7990 * basis; 0 if not. 7991 */ 7992 static int 7993 dtrace_difo_cacheable(dtrace_difo_t *dp) 7994 { 7995 int i; 7996 7997 if (dp == NULL) 7998 return (0); 7999 8000 for (i = 0; i < dp->dtdo_varlen; i++) { 8001 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8002 8003 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8004 continue; 8005 8006 switch (v->dtdv_id) { 8007 case DIF_VAR_CURTHREAD: 8008 case DIF_VAR_PID: 8009 case DIF_VAR_TID: 8010 case DIF_VAR_EXECNAME: 8011 case DIF_VAR_ZONENAME: 8012 break; 8013 8014 default: 8015 return (0); 8016 } 8017 } 8018 8019 /* 8020 * This DIF object may be cacheable. Now we need to look for any 8021 * array loading instructions, any memory loading instructions, or 8022 * any stores to thread-local variables. 8023 */ 8024 for (i = 0; i < dp->dtdo_len; i++) { 8025 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8026 8027 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8028 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8029 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8030 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8031 return (0); 8032 } 8033 8034 return (1); 8035 } 8036 8037 static void 8038 dtrace_difo_hold(dtrace_difo_t *dp) 8039 { 8040 int i; 8041 8042 ASSERT(MUTEX_HELD(&dtrace_lock)); 8043 8044 dp->dtdo_refcnt++; 8045 ASSERT(dp->dtdo_refcnt != 0); 8046 8047 /* 8048 * We need to check this DIF object for references to the variable 8049 * DIF_VAR_VTIMESTAMP. 8050 */ 8051 for (i = 0; i < dp->dtdo_varlen; i++) { 8052 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8053 8054 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8055 continue; 8056 8057 if (dtrace_vtime_references++ == 0) 8058 dtrace_vtime_enable(); 8059 } 8060 } 8061 8062 /* 8063 * This routine calculates the dynamic variable chunksize for a given DIF 8064 * object. The calculation is not fool-proof, and can probably be tricked by 8065 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8066 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8067 * if a dynamic variable size exceeds the chunksize. 8068 */ 8069 static void 8070 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8071 { 8072 uint64_t sval; 8073 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8074 const dif_instr_t *text = dp->dtdo_buf; 8075 uint_t pc, srd = 0; 8076 uint_t ttop = 0; 8077 size_t size, ksize; 8078 uint_t id, i; 8079 8080 for (pc = 0; pc < dp->dtdo_len; pc++) { 8081 dif_instr_t instr = text[pc]; 8082 uint_t op = DIF_INSTR_OP(instr); 8083 uint_t rd = DIF_INSTR_RD(instr); 8084 uint_t r1 = DIF_INSTR_R1(instr); 8085 uint_t nkeys = 0; 8086 uchar_t scope; 8087 8088 dtrace_key_t *key = tupregs; 8089 8090 switch (op) { 8091 case DIF_OP_SETX: 8092 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8093 srd = rd; 8094 continue; 8095 8096 case DIF_OP_STTS: 8097 key = &tupregs[DIF_DTR_NREGS]; 8098 key[0].dttk_size = 0; 8099 key[1].dttk_size = 0; 8100 nkeys = 2; 8101 scope = DIFV_SCOPE_THREAD; 8102 break; 8103 8104 case DIF_OP_STGAA: 8105 case DIF_OP_STTAA: 8106 nkeys = ttop; 8107 8108 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8109 key[nkeys++].dttk_size = 0; 8110 8111 key[nkeys++].dttk_size = 0; 8112 8113 if (op == DIF_OP_STTAA) { 8114 scope = DIFV_SCOPE_THREAD; 8115 } else { 8116 scope = DIFV_SCOPE_GLOBAL; 8117 } 8118 8119 break; 8120 8121 case DIF_OP_PUSHTR: 8122 if (ttop == DIF_DTR_NREGS) 8123 return; 8124 8125 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8126 /* 8127 * If the register for the size of the "pushtr" 8128 * is %r0 (or the value is 0) and the type is 8129 * a string, we'll use the system-wide default 8130 * string size. 8131 */ 8132 tupregs[ttop++].dttk_size = 8133 dtrace_strsize_default; 8134 } else { 8135 if (srd == 0) 8136 return; 8137 8138 tupregs[ttop++].dttk_size = sval; 8139 } 8140 8141 break; 8142 8143 case DIF_OP_PUSHTV: 8144 if (ttop == DIF_DTR_NREGS) 8145 return; 8146 8147 tupregs[ttop++].dttk_size = 0; 8148 break; 8149 8150 case DIF_OP_FLUSHTS: 8151 ttop = 0; 8152 break; 8153 8154 case DIF_OP_POPTS: 8155 if (ttop != 0) 8156 ttop--; 8157 break; 8158 } 8159 8160 sval = 0; 8161 srd = 0; 8162 8163 if (nkeys == 0) 8164 continue; 8165 8166 /* 8167 * We have a dynamic variable allocation; calculate its size. 8168 */ 8169 for (ksize = 0, i = 0; i < nkeys; i++) 8170 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 8171 8172 size = sizeof (dtrace_dynvar_t); 8173 size += sizeof (dtrace_key_t) * (nkeys - 1); 8174 size += ksize; 8175 8176 /* 8177 * Now we need to determine the size of the stored data. 8178 */ 8179 id = DIF_INSTR_VAR(instr); 8180 8181 for (i = 0; i < dp->dtdo_varlen; i++) { 8182 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8183 8184 if (v->dtdv_id == id && v->dtdv_scope == scope) { 8185 size += v->dtdv_type.dtdt_size; 8186 break; 8187 } 8188 } 8189 8190 if (i == dp->dtdo_varlen) 8191 return; 8192 8193 /* 8194 * We have the size. If this is larger than the chunk size 8195 * for our dynamic variable state, reset the chunk size. 8196 */ 8197 size = P2ROUNDUP(size, sizeof (uint64_t)); 8198 8199 if (size > vstate->dtvs_dynvars.dtds_chunksize) 8200 vstate->dtvs_dynvars.dtds_chunksize = size; 8201 } 8202 } 8203 8204 static void 8205 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8206 { 8207 int i, oldsvars, osz, nsz, otlocals, ntlocals; 8208 uint_t id; 8209 8210 ASSERT(MUTEX_HELD(&dtrace_lock)); 8211 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 8212 8213 for (i = 0; i < dp->dtdo_varlen; i++) { 8214 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8215 dtrace_statvar_t *svar, ***svarp; 8216 size_t dsize = 0; 8217 uint8_t scope = v->dtdv_scope; 8218 int *np; 8219 8220 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8221 continue; 8222 8223 id -= DIF_VAR_OTHER_UBASE; 8224 8225 switch (scope) { 8226 case DIFV_SCOPE_THREAD: 8227 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 8228 dtrace_difv_t *tlocals; 8229 8230 if ((ntlocals = (otlocals << 1)) == 0) 8231 ntlocals = 1; 8232 8233 osz = otlocals * sizeof (dtrace_difv_t); 8234 nsz = ntlocals * sizeof (dtrace_difv_t); 8235 8236 tlocals = kmem_zalloc(nsz, KM_SLEEP); 8237 8238 if (osz != 0) { 8239 bcopy(vstate->dtvs_tlocals, 8240 tlocals, osz); 8241 kmem_free(vstate->dtvs_tlocals, osz); 8242 } 8243 8244 vstate->dtvs_tlocals = tlocals; 8245 vstate->dtvs_ntlocals = ntlocals; 8246 } 8247 8248 vstate->dtvs_tlocals[id] = *v; 8249 continue; 8250 8251 case DIFV_SCOPE_LOCAL: 8252 np = &vstate->dtvs_nlocals; 8253 svarp = &vstate->dtvs_locals; 8254 8255 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8256 dsize = NCPU * (v->dtdv_type.dtdt_size + 8257 sizeof (uint64_t)); 8258 else 8259 dsize = NCPU * sizeof (uint64_t); 8260 8261 break; 8262 8263 case DIFV_SCOPE_GLOBAL: 8264 np = &vstate->dtvs_nglobals; 8265 svarp = &vstate->dtvs_globals; 8266 8267 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8268 dsize = v->dtdv_type.dtdt_size + 8269 sizeof (uint64_t); 8270 8271 break; 8272 8273 default: 8274 ASSERT(0); 8275 } 8276 8277 while (id >= (oldsvars = *np)) { 8278 dtrace_statvar_t **statics; 8279 int newsvars, oldsize, newsize; 8280 8281 if ((newsvars = (oldsvars << 1)) == 0) 8282 newsvars = 1; 8283 8284 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 8285 newsize = newsvars * sizeof (dtrace_statvar_t *); 8286 8287 statics = kmem_zalloc(newsize, KM_SLEEP); 8288 8289 if (oldsize != 0) { 8290 bcopy(*svarp, statics, oldsize); 8291 kmem_free(*svarp, oldsize); 8292 } 8293 8294 *svarp = statics; 8295 *np = newsvars; 8296 } 8297 8298 if ((svar = (*svarp)[id]) == NULL) { 8299 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 8300 svar->dtsv_var = *v; 8301 8302 if ((svar->dtsv_size = dsize) != 0) { 8303 svar->dtsv_data = (uint64_t)(uintptr_t) 8304 kmem_zalloc(dsize, KM_SLEEP); 8305 } 8306 8307 (*svarp)[id] = svar; 8308 } 8309 8310 svar->dtsv_refcnt++; 8311 } 8312 8313 dtrace_difo_chunksize(dp, vstate); 8314 dtrace_difo_hold(dp); 8315 } 8316 8317 static dtrace_difo_t * 8318 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8319 { 8320 dtrace_difo_t *new; 8321 size_t sz; 8322 8323 ASSERT(dp->dtdo_buf != NULL); 8324 ASSERT(dp->dtdo_refcnt != 0); 8325 8326 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 8327 8328 ASSERT(dp->dtdo_buf != NULL); 8329 sz = dp->dtdo_len * sizeof (dif_instr_t); 8330 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 8331 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 8332 new->dtdo_len = dp->dtdo_len; 8333 8334 if (dp->dtdo_strtab != NULL) { 8335 ASSERT(dp->dtdo_strlen != 0); 8336 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 8337 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 8338 new->dtdo_strlen = dp->dtdo_strlen; 8339 } 8340 8341 if (dp->dtdo_inttab != NULL) { 8342 ASSERT(dp->dtdo_intlen != 0); 8343 sz = dp->dtdo_intlen * sizeof (uint64_t); 8344 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8345 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8346 new->dtdo_intlen = dp->dtdo_intlen; 8347 } 8348 8349 if (dp->dtdo_vartab != NULL) { 8350 ASSERT(dp->dtdo_varlen != 0); 8351 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8352 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8353 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8354 new->dtdo_varlen = dp->dtdo_varlen; 8355 } 8356 8357 dtrace_difo_init(new, vstate); 8358 return (new); 8359 } 8360 8361 static void 8362 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8363 { 8364 int i; 8365 8366 ASSERT(dp->dtdo_refcnt == 0); 8367 8368 for (i = 0; i < dp->dtdo_varlen; i++) { 8369 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8370 dtrace_statvar_t *svar, **svarp; 8371 uint_t id; 8372 uint8_t scope = v->dtdv_scope; 8373 int *np; 8374 8375 switch (scope) { 8376 case DIFV_SCOPE_THREAD: 8377 continue; 8378 8379 case DIFV_SCOPE_LOCAL: 8380 np = &vstate->dtvs_nlocals; 8381 svarp = vstate->dtvs_locals; 8382 break; 8383 8384 case DIFV_SCOPE_GLOBAL: 8385 np = &vstate->dtvs_nglobals; 8386 svarp = vstate->dtvs_globals; 8387 break; 8388 8389 default: 8390 ASSERT(0); 8391 } 8392 8393 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8394 continue; 8395 8396 id -= DIF_VAR_OTHER_UBASE; 8397 ASSERT(id < *np); 8398 8399 svar = svarp[id]; 8400 ASSERT(svar != NULL); 8401 ASSERT(svar->dtsv_refcnt > 0); 8402 8403 if (--svar->dtsv_refcnt > 0) 8404 continue; 8405 8406 if (svar->dtsv_size != 0) { 8407 ASSERT(svar->dtsv_data != NULL); 8408 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8409 svar->dtsv_size); 8410 } 8411 8412 kmem_free(svar, sizeof (dtrace_statvar_t)); 8413 svarp[id] = NULL; 8414 } 8415 8416 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8417 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8418 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8419 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8420 8421 kmem_free(dp, sizeof (dtrace_difo_t)); 8422 } 8423 8424 static void 8425 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8426 { 8427 int i; 8428 8429 ASSERT(MUTEX_HELD(&dtrace_lock)); 8430 ASSERT(dp->dtdo_refcnt != 0); 8431 8432 for (i = 0; i < dp->dtdo_varlen; i++) { 8433 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8434 8435 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8436 continue; 8437 8438 ASSERT(dtrace_vtime_references > 0); 8439 if (--dtrace_vtime_references == 0) 8440 dtrace_vtime_disable(); 8441 } 8442 8443 if (--dp->dtdo_refcnt == 0) 8444 dtrace_difo_destroy(dp, vstate); 8445 } 8446 8447 /* 8448 * DTrace Format Functions 8449 */ 8450 static uint16_t 8451 dtrace_format_add(dtrace_state_t *state, char *str) 8452 { 8453 char *fmt, **new; 8454 uint16_t ndx, len = strlen(str) + 1; 8455 8456 fmt = kmem_zalloc(len, KM_SLEEP); 8457 bcopy(str, fmt, len); 8458 8459 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 8460 if (state->dts_formats[ndx] == NULL) { 8461 state->dts_formats[ndx] = fmt; 8462 return (ndx + 1); 8463 } 8464 } 8465 8466 if (state->dts_nformats == USHRT_MAX) { 8467 /* 8468 * This is only likely if a denial-of-service attack is being 8469 * attempted. As such, it's okay to fail silently here. 8470 */ 8471 kmem_free(fmt, len); 8472 return (0); 8473 } 8474 8475 /* 8476 * For simplicity, we always resize the formats array to be exactly the 8477 * number of formats. 8478 */ 8479 ndx = state->dts_nformats++; 8480 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 8481 8482 if (state->dts_formats != NULL) { 8483 ASSERT(ndx != 0); 8484 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 8485 kmem_free(state->dts_formats, ndx * sizeof (char *)); 8486 } 8487 8488 state->dts_formats = new; 8489 state->dts_formats[ndx] = fmt; 8490 8491 return (ndx + 1); 8492 } 8493 8494 static void 8495 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 8496 { 8497 char *fmt; 8498 8499 ASSERT(state->dts_formats != NULL); 8500 ASSERT(format <= state->dts_nformats); 8501 ASSERT(state->dts_formats[format - 1] != NULL); 8502 8503 fmt = state->dts_formats[format - 1]; 8504 kmem_free(fmt, strlen(fmt) + 1); 8505 state->dts_formats[format - 1] = NULL; 8506 } 8507 8508 static void 8509 dtrace_format_destroy(dtrace_state_t *state) 8510 { 8511 int i; 8512 8513 if (state->dts_nformats == 0) { 8514 ASSERT(state->dts_formats == NULL); 8515 return; 8516 } 8517 8518 ASSERT(state->dts_formats != NULL); 8519 8520 for (i = 0; i < state->dts_nformats; i++) { 8521 char *fmt = state->dts_formats[i]; 8522 8523 if (fmt == NULL) 8524 continue; 8525 8526 kmem_free(fmt, strlen(fmt) + 1); 8527 } 8528 8529 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 8530 state->dts_nformats = 0; 8531 state->dts_formats = NULL; 8532 } 8533 8534 /* 8535 * DTrace Predicate Functions 8536 */ 8537 static dtrace_predicate_t * 8538 dtrace_predicate_create(dtrace_difo_t *dp) 8539 { 8540 dtrace_predicate_t *pred; 8541 8542 ASSERT(MUTEX_HELD(&dtrace_lock)); 8543 ASSERT(dp->dtdo_refcnt != 0); 8544 8545 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 8546 pred->dtp_difo = dp; 8547 pred->dtp_refcnt = 1; 8548 8549 if (!dtrace_difo_cacheable(dp)) 8550 return (pred); 8551 8552 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 8553 /* 8554 * This is only theoretically possible -- we have had 2^32 8555 * cacheable predicates on this machine. We cannot allow any 8556 * more predicates to become cacheable: as unlikely as it is, 8557 * there may be a thread caching a (now stale) predicate cache 8558 * ID. (N.B.: the temptation is being successfully resisted to 8559 * have this cmn_err() "Holy shit -- we executed this code!") 8560 */ 8561 return (pred); 8562 } 8563 8564 pred->dtp_cacheid = dtrace_predcache_id++; 8565 8566 return (pred); 8567 } 8568 8569 static void 8570 dtrace_predicate_hold(dtrace_predicate_t *pred) 8571 { 8572 ASSERT(MUTEX_HELD(&dtrace_lock)); 8573 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 8574 ASSERT(pred->dtp_refcnt > 0); 8575 8576 pred->dtp_refcnt++; 8577 } 8578 8579 static void 8580 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 8581 { 8582 dtrace_difo_t *dp = pred->dtp_difo; 8583 8584 ASSERT(MUTEX_HELD(&dtrace_lock)); 8585 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 8586 ASSERT(pred->dtp_refcnt > 0); 8587 8588 if (--pred->dtp_refcnt == 0) { 8589 dtrace_difo_release(pred->dtp_difo, vstate); 8590 kmem_free(pred, sizeof (dtrace_predicate_t)); 8591 } 8592 } 8593 8594 /* 8595 * DTrace Action Description Functions 8596 */ 8597 static dtrace_actdesc_t * 8598 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 8599 uint64_t uarg, uint64_t arg) 8600 { 8601 dtrace_actdesc_t *act; 8602 8603 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 8604 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 8605 8606 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 8607 act->dtad_kind = kind; 8608 act->dtad_ntuple = ntuple; 8609 act->dtad_uarg = uarg; 8610 act->dtad_arg = arg; 8611 act->dtad_refcnt = 1; 8612 8613 return (act); 8614 } 8615 8616 static void 8617 dtrace_actdesc_hold(dtrace_actdesc_t *act) 8618 { 8619 ASSERT(act->dtad_refcnt >= 1); 8620 act->dtad_refcnt++; 8621 } 8622 8623 static void 8624 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 8625 { 8626 dtrace_actkind_t kind = act->dtad_kind; 8627 dtrace_difo_t *dp; 8628 8629 ASSERT(act->dtad_refcnt >= 1); 8630 8631 if (--act->dtad_refcnt != 0) 8632 return; 8633 8634 if ((dp = act->dtad_difo) != NULL) 8635 dtrace_difo_release(dp, vstate); 8636 8637 if (DTRACEACT_ISPRINTFLIKE(kind)) { 8638 char *str = (char *)(uintptr_t)act->dtad_arg; 8639 8640 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 8641 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 8642 8643 if (str != NULL) 8644 kmem_free(str, strlen(str) + 1); 8645 } 8646 8647 kmem_free(act, sizeof (dtrace_actdesc_t)); 8648 } 8649 8650 /* 8651 * DTrace ECB Functions 8652 */ 8653 static dtrace_ecb_t * 8654 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 8655 { 8656 dtrace_ecb_t *ecb; 8657 dtrace_epid_t epid; 8658 8659 ASSERT(MUTEX_HELD(&dtrace_lock)); 8660 8661 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 8662 ecb->dte_predicate = NULL; 8663 ecb->dte_probe = probe; 8664 8665 /* 8666 * The default size is the size of the default action: recording 8667 * the epid. 8668 */ 8669 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8670 ecb->dte_alignment = sizeof (dtrace_epid_t); 8671 8672 epid = state->dts_epid++; 8673 8674 if (epid - 1 >= state->dts_necbs) { 8675 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 8676 int necbs = state->dts_necbs << 1; 8677 8678 ASSERT(epid == state->dts_necbs + 1); 8679 8680 if (necbs == 0) { 8681 ASSERT(oecbs == NULL); 8682 necbs = 1; 8683 } 8684 8685 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 8686 8687 if (oecbs != NULL) 8688 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 8689 8690 dtrace_membar_producer(); 8691 state->dts_ecbs = ecbs; 8692 8693 if (oecbs != NULL) { 8694 /* 8695 * If this state is active, we must dtrace_sync() 8696 * before we can free the old dts_ecbs array: we're 8697 * coming in hot, and there may be active ring 8698 * buffer processing (which indexes into the dts_ecbs 8699 * array) on another CPU. 8700 */ 8701 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 8702 dtrace_sync(); 8703 8704 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 8705 } 8706 8707 dtrace_membar_producer(); 8708 state->dts_necbs = necbs; 8709 } 8710 8711 ecb->dte_state = state; 8712 8713 ASSERT(state->dts_ecbs[epid - 1] == NULL); 8714 dtrace_membar_producer(); 8715 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8716 8717 return (ecb); 8718 } 8719 8720 static void 8721 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8722 { 8723 dtrace_probe_t *probe = ecb->dte_probe; 8724 8725 ASSERT(MUTEX_HELD(&cpu_lock)); 8726 ASSERT(MUTEX_HELD(&dtrace_lock)); 8727 ASSERT(ecb->dte_next == NULL); 8728 8729 if (probe == NULL) { 8730 /* 8731 * This is the NULL probe -- there's nothing to do. 8732 */ 8733 return; 8734 } 8735 8736 if (probe->dtpr_ecb == NULL) { 8737 dtrace_provider_t *prov = probe->dtpr_provider; 8738 8739 /* 8740 * We're the first ECB on this probe. 8741 */ 8742 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8743 8744 if (ecb->dte_predicate != NULL) 8745 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8746 8747 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8748 probe->dtpr_id, probe->dtpr_arg); 8749 } else { 8750 /* 8751 * This probe is already active. Swing the last pointer to 8752 * point to the new ECB, and issue a dtrace_sync() to assure 8753 * that all CPUs have seen the change. 8754 */ 8755 ASSERT(probe->dtpr_ecb_last != NULL); 8756 probe->dtpr_ecb_last->dte_next = ecb; 8757 probe->dtpr_ecb_last = ecb; 8758 probe->dtpr_predcache = 0; 8759 8760 dtrace_sync(); 8761 } 8762 } 8763 8764 static void 8765 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8766 { 8767 uint32_t maxalign = sizeof (dtrace_epid_t); 8768 uint32_t align = sizeof (uint8_t), offs, diff; 8769 dtrace_action_t *act; 8770 int wastuple = 0; 8771 uint32_t aggbase = UINT32_MAX; 8772 dtrace_state_t *state = ecb->dte_state; 8773 8774 /* 8775 * If we record anything, we always record the epid. (And we always 8776 * record it first.) 8777 */ 8778 offs = sizeof (dtrace_epid_t); 8779 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8780 8781 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8782 dtrace_recdesc_t *rec = &act->dta_rec; 8783 8784 if ((align = rec->dtrd_alignment) > maxalign) 8785 maxalign = align; 8786 8787 if (!wastuple && act->dta_intuple) { 8788 /* 8789 * This is the first record in a tuple. Align the 8790 * offset to be at offset 4 in an 8-byte aligned 8791 * block. 8792 */ 8793 diff = offs + sizeof (dtrace_aggid_t); 8794 8795 if (diff = (diff & (sizeof (uint64_t) - 1))) 8796 offs += sizeof (uint64_t) - diff; 8797 8798 aggbase = offs - sizeof (dtrace_aggid_t); 8799 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 8800 } 8801 8802 /*LINTED*/ 8803 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 8804 /* 8805 * The current offset is not properly aligned; align it. 8806 */ 8807 offs += align - diff; 8808 } 8809 8810 rec->dtrd_offset = offs; 8811 8812 if (offs + rec->dtrd_size > ecb->dte_needed) { 8813 ecb->dte_needed = offs + rec->dtrd_size; 8814 8815 if (ecb->dte_needed > state->dts_needed) 8816 state->dts_needed = ecb->dte_needed; 8817 } 8818 8819 if (DTRACEACT_ISAGG(act->dta_kind)) { 8820 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8821 dtrace_action_t *first = agg->dtag_first, *prev; 8822 8823 ASSERT(rec->dtrd_size != 0 && first != NULL); 8824 ASSERT(wastuple); 8825 ASSERT(aggbase != UINT32_MAX); 8826 8827 agg->dtag_base = aggbase; 8828 8829 while ((prev = first->dta_prev) != NULL && 8830 DTRACEACT_ISAGG(prev->dta_kind)) { 8831 agg = (dtrace_aggregation_t *)prev; 8832 first = agg->dtag_first; 8833 } 8834 8835 if (prev != NULL) { 8836 offs = prev->dta_rec.dtrd_offset + 8837 prev->dta_rec.dtrd_size; 8838 } else { 8839 offs = sizeof (dtrace_epid_t); 8840 } 8841 wastuple = 0; 8842 } else { 8843 if (!act->dta_intuple) 8844 ecb->dte_size = offs + rec->dtrd_size; 8845 8846 offs += rec->dtrd_size; 8847 } 8848 8849 wastuple = act->dta_intuple; 8850 } 8851 8852 if ((act = ecb->dte_action) != NULL && 8853 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8854 ecb->dte_size == sizeof (dtrace_epid_t)) { 8855 /* 8856 * If the size is still sizeof (dtrace_epid_t), then all 8857 * actions store no data; set the size to 0. 8858 */ 8859 ecb->dte_alignment = maxalign; 8860 ecb->dte_size = 0; 8861 8862 /* 8863 * If the needed space is still sizeof (dtrace_epid_t), then 8864 * all actions need no additional space; set the needed 8865 * size to 0. 8866 */ 8867 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8868 ecb->dte_needed = 0; 8869 8870 return; 8871 } 8872 8873 /* 8874 * Set our alignment, and make sure that the dte_size and dte_needed 8875 * are aligned to the size of an EPID. 8876 */ 8877 ecb->dte_alignment = maxalign; 8878 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8879 ~(sizeof (dtrace_epid_t) - 1); 8880 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8881 ~(sizeof (dtrace_epid_t) - 1); 8882 ASSERT(ecb->dte_size <= ecb->dte_needed); 8883 } 8884 8885 static dtrace_action_t * 8886 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8887 { 8888 dtrace_aggregation_t *agg; 8889 size_t size = sizeof (uint64_t); 8890 int ntuple = desc->dtad_ntuple; 8891 dtrace_action_t *act; 8892 dtrace_recdesc_t *frec; 8893 dtrace_aggid_t aggid; 8894 dtrace_state_t *state = ecb->dte_state; 8895 8896 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8897 agg->dtag_ecb = ecb; 8898 8899 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8900 8901 switch (desc->dtad_kind) { 8902 case DTRACEAGG_MIN: 8903 agg->dtag_initial = UINT64_MAX; 8904 agg->dtag_aggregate = dtrace_aggregate_min; 8905 break; 8906 8907 case DTRACEAGG_MAX: 8908 agg->dtag_aggregate = dtrace_aggregate_max; 8909 break; 8910 8911 case DTRACEAGG_COUNT: 8912 agg->dtag_aggregate = dtrace_aggregate_count; 8913 break; 8914 8915 case DTRACEAGG_QUANTIZE: 8916 agg->dtag_aggregate = dtrace_aggregate_quantize; 8917 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8918 sizeof (uint64_t); 8919 break; 8920 8921 case DTRACEAGG_LQUANTIZE: { 8922 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8923 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8924 8925 agg->dtag_initial = desc->dtad_arg; 8926 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8927 8928 if (step == 0 || levels == 0) 8929 goto err; 8930 8931 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8932 break; 8933 } 8934 8935 case DTRACEAGG_AVG: 8936 agg->dtag_aggregate = dtrace_aggregate_avg; 8937 size = sizeof (uint64_t) * 2; 8938 break; 8939 8940 case DTRACEAGG_SUM: 8941 agg->dtag_aggregate = dtrace_aggregate_sum; 8942 break; 8943 8944 default: 8945 goto err; 8946 } 8947 8948 agg->dtag_action.dta_rec.dtrd_size = size; 8949 8950 if (ntuple == 0) 8951 goto err; 8952 8953 /* 8954 * We must make sure that we have enough actions for the n-tuple. 8955 */ 8956 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8957 if (DTRACEACT_ISAGG(act->dta_kind)) 8958 break; 8959 8960 if (--ntuple == 0) { 8961 /* 8962 * This is the action with which our n-tuple begins. 8963 */ 8964 agg->dtag_first = act; 8965 goto success; 8966 } 8967 } 8968 8969 /* 8970 * This n-tuple is short by ntuple elements. Return failure. 8971 */ 8972 ASSERT(ntuple != 0); 8973 err: 8974 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8975 return (NULL); 8976 8977 success: 8978 /* 8979 * If the last action in the tuple has a size of zero, it's actually 8980 * an expression argument for the aggregating action. 8981 */ 8982 ASSERT(ecb->dte_action_last != NULL); 8983 act = ecb->dte_action_last; 8984 8985 if (act->dta_kind == DTRACEACT_DIFEXPR) { 8986 ASSERT(act->dta_difo != NULL); 8987 8988 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 8989 agg->dtag_hasarg = 1; 8990 } 8991 8992 /* 8993 * We need to allocate an id for this aggregation. 8994 */ 8995 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8996 VM_BESTFIT | VM_SLEEP); 8997 8998 if (aggid - 1 >= state->dts_naggregations) { 8999 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9000 dtrace_aggregation_t **aggs; 9001 int naggs = state->dts_naggregations << 1; 9002 int onaggs = state->dts_naggregations; 9003 9004 ASSERT(aggid == state->dts_naggregations + 1); 9005 9006 if (naggs == 0) { 9007 ASSERT(oaggs == NULL); 9008 naggs = 1; 9009 } 9010 9011 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9012 9013 if (oaggs != NULL) { 9014 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9015 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9016 } 9017 9018 state->dts_aggregations = aggs; 9019 state->dts_naggregations = naggs; 9020 } 9021 9022 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9023 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9024 9025 frec = &agg->dtag_first->dta_rec; 9026 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9027 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9028 9029 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9030 ASSERT(!act->dta_intuple); 9031 act->dta_intuple = 1; 9032 } 9033 9034 return (&agg->dtag_action); 9035 } 9036 9037 static void 9038 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9039 { 9040 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9041 dtrace_state_t *state = ecb->dte_state; 9042 dtrace_aggid_t aggid = agg->dtag_id; 9043 9044 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9045 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9046 9047 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9048 state->dts_aggregations[aggid - 1] = NULL; 9049 9050 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9051 } 9052 9053 static int 9054 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9055 { 9056 dtrace_action_t *action, *last; 9057 dtrace_difo_t *dp = desc->dtad_difo; 9058 uint32_t size = 0, align = sizeof (uint8_t), mask; 9059 uint16_t format = 0; 9060 dtrace_recdesc_t *rec; 9061 dtrace_state_t *state = ecb->dte_state; 9062 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9063 uint64_t arg = desc->dtad_arg; 9064 9065 ASSERT(MUTEX_HELD(&dtrace_lock)); 9066 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9067 9068 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9069 /* 9070 * If this is an aggregating action, there must be neither 9071 * a speculate nor a commit on the action chain. 9072 */ 9073 dtrace_action_t *act; 9074 9075 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9076 if (act->dta_kind == DTRACEACT_COMMIT) 9077 return (EINVAL); 9078 9079 if (act->dta_kind == DTRACEACT_SPECULATE) 9080 return (EINVAL); 9081 } 9082 9083 action = dtrace_ecb_aggregation_create(ecb, desc); 9084 9085 if (action == NULL) 9086 return (EINVAL); 9087 } else { 9088 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9089 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9090 dp != NULL && dp->dtdo_destructive)) { 9091 state->dts_destructive = 1; 9092 } 9093 9094 switch (desc->dtad_kind) { 9095 case DTRACEACT_PRINTF: 9096 case DTRACEACT_PRINTA: 9097 case DTRACEACT_SYSTEM: 9098 case DTRACEACT_FREOPEN: 9099 /* 9100 * We know that our arg is a string -- turn it into a 9101 * format. 9102 */ 9103 if (arg == NULL) { 9104 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 9105 format = 0; 9106 } else { 9107 ASSERT(arg != NULL); 9108 ASSERT(arg > KERNELBASE); 9109 format = dtrace_format_add(state, 9110 (char *)(uintptr_t)arg); 9111 } 9112 9113 /*FALLTHROUGH*/ 9114 case DTRACEACT_LIBACT: 9115 case DTRACEACT_DIFEXPR: 9116 if (dp == NULL) 9117 return (EINVAL); 9118 9119 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9120 break; 9121 9122 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 9123 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9124 return (EINVAL); 9125 9126 size = opt[DTRACEOPT_STRSIZE]; 9127 } 9128 9129 break; 9130 9131 case DTRACEACT_STACK: 9132 if ((nframes = arg) == 0) { 9133 nframes = opt[DTRACEOPT_STACKFRAMES]; 9134 ASSERT(nframes > 0); 9135 arg = nframes; 9136 } 9137 9138 size = nframes * sizeof (pc_t); 9139 break; 9140 9141 case DTRACEACT_JSTACK: 9142 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 9143 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 9144 9145 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 9146 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 9147 9148 arg = DTRACE_USTACK_ARG(nframes, strsize); 9149 9150 /*FALLTHROUGH*/ 9151 case DTRACEACT_USTACK: 9152 if (desc->dtad_kind != DTRACEACT_JSTACK && 9153 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 9154 strsize = DTRACE_USTACK_STRSIZE(arg); 9155 nframes = opt[DTRACEOPT_USTACKFRAMES]; 9156 ASSERT(nframes > 0); 9157 arg = DTRACE_USTACK_ARG(nframes, strsize); 9158 } 9159 9160 /* 9161 * Save a slot for the pid. 9162 */ 9163 size = (nframes + 1) * sizeof (uint64_t); 9164 size += DTRACE_USTACK_STRSIZE(arg); 9165 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 9166 9167 break; 9168 9169 case DTRACEACT_SYM: 9170 case DTRACEACT_MOD: 9171 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 9172 sizeof (uint64_t)) || 9173 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9174 return (EINVAL); 9175 break; 9176 9177 case DTRACEACT_USYM: 9178 case DTRACEACT_UMOD: 9179 case DTRACEACT_UADDR: 9180 if (dp == NULL || 9181 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 9182 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9183 return (EINVAL); 9184 9185 /* 9186 * We have a slot for the pid, plus a slot for the 9187 * argument. To keep things simple (aligned with 9188 * bitness-neutral sizing), we store each as a 64-bit 9189 * quantity. 9190 */ 9191 size = 2 * sizeof (uint64_t); 9192 break; 9193 9194 case DTRACEACT_STOP: 9195 case DTRACEACT_BREAKPOINT: 9196 case DTRACEACT_PANIC: 9197 break; 9198 9199 case DTRACEACT_CHILL: 9200 case DTRACEACT_DISCARD: 9201 case DTRACEACT_RAISE: 9202 if (dp == NULL) 9203 return (EINVAL); 9204 break; 9205 9206 case DTRACEACT_EXIT: 9207 if (dp == NULL || 9208 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 9209 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9210 return (EINVAL); 9211 break; 9212 9213 case DTRACEACT_SPECULATE: 9214 if (ecb->dte_size > sizeof (dtrace_epid_t)) 9215 return (EINVAL); 9216 9217 if (dp == NULL) 9218 return (EINVAL); 9219 9220 state->dts_speculates = 1; 9221 break; 9222 9223 case DTRACEACT_COMMIT: { 9224 dtrace_action_t *act = ecb->dte_action; 9225 9226 for (; act != NULL; act = act->dta_next) { 9227 if (act->dta_kind == DTRACEACT_COMMIT) 9228 return (EINVAL); 9229 } 9230 9231 if (dp == NULL) 9232 return (EINVAL); 9233 break; 9234 } 9235 9236 default: 9237 return (EINVAL); 9238 } 9239 9240 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 9241 /* 9242 * If this is a data-storing action or a speculate, 9243 * we must be sure that there isn't a commit on the 9244 * action chain. 9245 */ 9246 dtrace_action_t *act = ecb->dte_action; 9247 9248 for (; act != NULL; act = act->dta_next) { 9249 if (act->dta_kind == DTRACEACT_COMMIT) 9250 return (EINVAL); 9251 } 9252 } 9253 9254 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 9255 action->dta_rec.dtrd_size = size; 9256 } 9257 9258 action->dta_refcnt = 1; 9259 rec = &action->dta_rec; 9260 size = rec->dtrd_size; 9261 9262 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 9263 if (!(size & mask)) { 9264 align = mask + 1; 9265 break; 9266 } 9267 } 9268 9269 action->dta_kind = desc->dtad_kind; 9270 9271 if ((action->dta_difo = dp) != NULL) 9272 dtrace_difo_hold(dp); 9273 9274 rec->dtrd_action = action->dta_kind; 9275 rec->dtrd_arg = arg; 9276 rec->dtrd_uarg = desc->dtad_uarg; 9277 rec->dtrd_alignment = (uint16_t)align; 9278 rec->dtrd_format = format; 9279 9280 if ((last = ecb->dte_action_last) != NULL) { 9281 ASSERT(ecb->dte_action != NULL); 9282 action->dta_prev = last; 9283 last->dta_next = action; 9284 } else { 9285 ASSERT(ecb->dte_action == NULL); 9286 ecb->dte_action = action; 9287 } 9288 9289 ecb->dte_action_last = action; 9290 9291 return (0); 9292 } 9293 9294 static void 9295 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 9296 { 9297 dtrace_action_t *act = ecb->dte_action, *next; 9298 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 9299 dtrace_difo_t *dp; 9300 uint16_t format; 9301 9302 if (act != NULL && act->dta_refcnt > 1) { 9303 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 9304 act->dta_refcnt--; 9305 } else { 9306 for (; act != NULL; act = next) { 9307 next = act->dta_next; 9308 ASSERT(next != NULL || act == ecb->dte_action_last); 9309 ASSERT(act->dta_refcnt == 1); 9310 9311 if ((format = act->dta_rec.dtrd_format) != 0) 9312 dtrace_format_remove(ecb->dte_state, format); 9313 9314 if ((dp = act->dta_difo) != NULL) 9315 dtrace_difo_release(dp, vstate); 9316 9317 if (DTRACEACT_ISAGG(act->dta_kind)) { 9318 dtrace_ecb_aggregation_destroy(ecb, act); 9319 } else { 9320 kmem_free(act, sizeof (dtrace_action_t)); 9321 } 9322 } 9323 } 9324 9325 ecb->dte_action = NULL; 9326 ecb->dte_action_last = NULL; 9327 ecb->dte_size = sizeof (dtrace_epid_t); 9328 } 9329 9330 static void 9331 dtrace_ecb_disable(dtrace_ecb_t *ecb) 9332 { 9333 /* 9334 * We disable the ECB by removing it from its probe. 9335 */ 9336 dtrace_ecb_t *pecb, *prev = NULL; 9337 dtrace_probe_t *probe = ecb->dte_probe; 9338 9339 ASSERT(MUTEX_HELD(&dtrace_lock)); 9340 9341 if (probe == NULL) { 9342 /* 9343 * This is the NULL probe; there is nothing to disable. 9344 */ 9345 return; 9346 } 9347 9348 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9349 if (pecb == ecb) 9350 break; 9351 prev = pecb; 9352 } 9353 9354 ASSERT(pecb != NULL); 9355 9356 if (prev == NULL) { 9357 probe->dtpr_ecb = ecb->dte_next; 9358 } else { 9359 prev->dte_next = ecb->dte_next; 9360 } 9361 9362 if (ecb == probe->dtpr_ecb_last) { 9363 ASSERT(ecb->dte_next == NULL); 9364 probe->dtpr_ecb_last = prev; 9365 } 9366 9367 /* 9368 * The ECB has been disconnected from the probe; now sync to assure 9369 * that all CPUs have seen the change before returning. 9370 */ 9371 dtrace_sync(); 9372 9373 if (probe->dtpr_ecb == NULL) { 9374 /* 9375 * That was the last ECB on the probe; clear the predicate 9376 * cache ID for the probe, disable it and sync one more time 9377 * to assure that we'll never hit it again. 9378 */ 9379 dtrace_provider_t *prov = probe->dtpr_provider; 9380 9381 ASSERT(ecb->dte_next == NULL); 9382 ASSERT(probe->dtpr_ecb_last == NULL); 9383 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9384 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9385 probe->dtpr_id, probe->dtpr_arg); 9386 dtrace_sync(); 9387 } else { 9388 /* 9389 * There is at least one ECB remaining on the probe. If there 9390 * is _exactly_ one, set the probe's predicate cache ID to be 9391 * the predicate cache ID of the remaining ECB. 9392 */ 9393 ASSERT(probe->dtpr_ecb_last != NULL); 9394 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9395 9396 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9397 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9398 9399 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9400 9401 if (p != NULL) 9402 probe->dtpr_predcache = p->dtp_cacheid; 9403 } 9404 9405 ecb->dte_next = NULL; 9406 } 9407 } 9408 9409 static void 9410 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9411 { 9412 dtrace_state_t *state = ecb->dte_state; 9413 dtrace_vstate_t *vstate = &state->dts_vstate; 9414 dtrace_predicate_t *pred; 9415 dtrace_epid_t epid = ecb->dte_epid; 9416 9417 ASSERT(MUTEX_HELD(&dtrace_lock)); 9418 ASSERT(ecb->dte_next == NULL); 9419 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 9420 9421 if ((pred = ecb->dte_predicate) != NULL) 9422 dtrace_predicate_release(pred, vstate); 9423 9424 dtrace_ecb_action_remove(ecb); 9425 9426 ASSERT(state->dts_ecbs[epid - 1] == ecb); 9427 state->dts_ecbs[epid - 1] = NULL; 9428 9429 kmem_free(ecb, sizeof (dtrace_ecb_t)); 9430 } 9431 9432 static dtrace_ecb_t * 9433 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 9434 dtrace_enabling_t *enab) 9435 { 9436 dtrace_ecb_t *ecb; 9437 dtrace_predicate_t *pred; 9438 dtrace_actdesc_t *act; 9439 dtrace_provider_t *prov; 9440 dtrace_ecbdesc_t *desc = enab->dten_current; 9441 9442 ASSERT(MUTEX_HELD(&dtrace_lock)); 9443 ASSERT(state != NULL); 9444 9445 ecb = dtrace_ecb_add(state, probe); 9446 ecb->dte_uarg = desc->dted_uarg; 9447 9448 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 9449 dtrace_predicate_hold(pred); 9450 ecb->dte_predicate = pred; 9451 } 9452 9453 if (probe != NULL) { 9454 /* 9455 * If the provider shows more leg than the consumer is old 9456 * enough to see, we need to enable the appropriate implicit 9457 * predicate bits to prevent the ecb from activating at 9458 * revealing times. 9459 * 9460 * Providers specifying DTRACE_PRIV_USER at register time 9461 * are stating that they need the /proc-style privilege 9462 * model to be enforced, and this is what DTRACE_COND_OWNER 9463 * and DTRACE_COND_ZONEOWNER will then do at probe time. 9464 */ 9465 prov = probe->dtpr_provider; 9466 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 9467 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9468 ecb->dte_cond |= DTRACE_COND_OWNER; 9469 9470 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 9471 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9472 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 9473 9474 /* 9475 * If the provider shows us kernel innards and the user 9476 * is lacking sufficient privilege, enable the 9477 * DTRACE_COND_USERMODE implicit predicate. 9478 */ 9479 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 9480 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 9481 ecb->dte_cond |= DTRACE_COND_USERMODE; 9482 } 9483 9484 if (dtrace_ecb_create_cache != NULL) { 9485 /* 9486 * If we have a cached ecb, we'll use its action list instead 9487 * of creating our own (saving both time and space). 9488 */ 9489 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 9490 dtrace_action_t *act = cached->dte_action; 9491 9492 if (act != NULL) { 9493 ASSERT(act->dta_refcnt > 0); 9494 act->dta_refcnt++; 9495 ecb->dte_action = act; 9496 ecb->dte_action_last = cached->dte_action_last; 9497 ecb->dte_needed = cached->dte_needed; 9498 ecb->dte_size = cached->dte_size; 9499 ecb->dte_alignment = cached->dte_alignment; 9500 } 9501 9502 return (ecb); 9503 } 9504 9505 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 9506 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 9507 dtrace_ecb_destroy(ecb); 9508 return (NULL); 9509 } 9510 } 9511 9512 dtrace_ecb_resize(ecb); 9513 9514 return (dtrace_ecb_create_cache = ecb); 9515 } 9516 9517 static int 9518 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 9519 { 9520 dtrace_ecb_t *ecb; 9521 dtrace_enabling_t *enab = arg; 9522 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 9523 9524 ASSERT(state != NULL); 9525 9526 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 9527 /* 9528 * This probe was created in a generation for which this 9529 * enabling has previously created ECBs; we don't want to 9530 * enable it again, so just kick out. 9531 */ 9532 return (DTRACE_MATCH_NEXT); 9533 } 9534 9535 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 9536 return (DTRACE_MATCH_DONE); 9537 9538 dtrace_ecb_enable(ecb); 9539 return (DTRACE_MATCH_NEXT); 9540 } 9541 9542 static dtrace_ecb_t * 9543 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 9544 { 9545 dtrace_ecb_t *ecb; 9546 9547 ASSERT(MUTEX_HELD(&dtrace_lock)); 9548 9549 if (id == 0 || id > state->dts_necbs) 9550 return (NULL); 9551 9552 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 9553 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 9554 9555 return (state->dts_ecbs[id - 1]); 9556 } 9557 9558 static dtrace_aggregation_t * 9559 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 9560 { 9561 dtrace_aggregation_t *agg; 9562 9563 ASSERT(MUTEX_HELD(&dtrace_lock)); 9564 9565 if (id == 0 || id > state->dts_naggregations) 9566 return (NULL); 9567 9568 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 9569 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 9570 agg->dtag_id == id); 9571 9572 return (state->dts_aggregations[id - 1]); 9573 } 9574 9575 /* 9576 * DTrace Buffer Functions 9577 * 9578 * The following functions manipulate DTrace buffers. Most of these functions 9579 * are called in the context of establishing or processing consumer state; 9580 * exceptions are explicitly noted. 9581 */ 9582 9583 /* 9584 * Note: called from cross call context. This function switches the two 9585 * buffers on a given CPU. The atomicity of this operation is assured by 9586 * disabling interrupts while the actual switch takes place; the disabling of 9587 * interrupts serializes the execution with any execution of dtrace_probe() on 9588 * the same CPU. 9589 */ 9590 static void 9591 dtrace_buffer_switch(dtrace_buffer_t *buf) 9592 { 9593 caddr_t tomax = buf->dtb_tomax; 9594 caddr_t xamot = buf->dtb_xamot; 9595 dtrace_icookie_t cookie; 9596 9597 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9598 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 9599 9600 cookie = dtrace_interrupt_disable(); 9601 buf->dtb_tomax = xamot; 9602 buf->dtb_xamot = tomax; 9603 buf->dtb_xamot_drops = buf->dtb_drops; 9604 buf->dtb_xamot_offset = buf->dtb_offset; 9605 buf->dtb_xamot_errors = buf->dtb_errors; 9606 buf->dtb_xamot_flags = buf->dtb_flags; 9607 buf->dtb_offset = 0; 9608 buf->dtb_drops = 0; 9609 buf->dtb_errors = 0; 9610 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 9611 dtrace_interrupt_enable(cookie); 9612 } 9613 9614 /* 9615 * Note: called from cross call context. This function activates a buffer 9616 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 9617 * is guaranteed by the disabling of interrupts. 9618 */ 9619 static void 9620 dtrace_buffer_activate(dtrace_state_t *state) 9621 { 9622 dtrace_buffer_t *buf; 9623 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 9624 9625 buf = &state->dts_buffer[CPU->cpu_id]; 9626 9627 if (buf->dtb_tomax != NULL) { 9628 /* 9629 * We might like to assert that the buffer is marked inactive, 9630 * but this isn't necessarily true: the buffer for the CPU 9631 * that processes the BEGIN probe has its buffer activated 9632 * manually. In this case, we take the (harmless) action 9633 * re-clearing the bit INACTIVE bit. 9634 */ 9635 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 9636 } 9637 9638 dtrace_interrupt_enable(cookie); 9639 } 9640 9641 static int 9642 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 9643 processorid_t cpu) 9644 { 9645 cpu_t *cp; 9646 dtrace_buffer_t *buf; 9647 9648 ASSERT(MUTEX_HELD(&cpu_lock)); 9649 ASSERT(MUTEX_HELD(&dtrace_lock)); 9650 9651 if (size > dtrace_nonroot_maxsize && 9652 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 9653 return (EFBIG); 9654 9655 cp = cpu_list; 9656 9657 do { 9658 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9659 continue; 9660 9661 buf = &bufs[cp->cpu_id]; 9662 9663 /* 9664 * If there is already a buffer allocated for this CPU, it 9665 * is only possible that this is a DR event. In this case, 9666 * the buffer size must match our specified size. 9667 */ 9668 if (buf->dtb_tomax != NULL) { 9669 ASSERT(buf->dtb_size == size); 9670 continue; 9671 } 9672 9673 ASSERT(buf->dtb_xamot == NULL); 9674 9675 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9676 goto err; 9677 9678 buf->dtb_size = size; 9679 buf->dtb_flags = flags; 9680 buf->dtb_offset = 0; 9681 buf->dtb_drops = 0; 9682 9683 if (flags & DTRACEBUF_NOSWITCH) 9684 continue; 9685 9686 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9687 goto err; 9688 } while ((cp = cp->cpu_next) != cpu_list); 9689 9690 return (0); 9691 9692 err: 9693 cp = cpu_list; 9694 9695 do { 9696 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9697 continue; 9698 9699 buf = &bufs[cp->cpu_id]; 9700 9701 if (buf->dtb_xamot != NULL) { 9702 ASSERT(buf->dtb_tomax != NULL); 9703 ASSERT(buf->dtb_size == size); 9704 kmem_free(buf->dtb_xamot, size); 9705 } 9706 9707 if (buf->dtb_tomax != NULL) { 9708 ASSERT(buf->dtb_size == size); 9709 kmem_free(buf->dtb_tomax, size); 9710 } 9711 9712 buf->dtb_tomax = NULL; 9713 buf->dtb_xamot = NULL; 9714 buf->dtb_size = 0; 9715 } while ((cp = cp->cpu_next) != cpu_list); 9716 9717 return (ENOMEM); 9718 } 9719 9720 /* 9721 * Note: called from probe context. This function just increments the drop 9722 * count on a buffer. It has been made a function to allow for the 9723 * possibility of understanding the source of mysterious drop counts. (A 9724 * problem for which one may be particularly disappointed that DTrace cannot 9725 * be used to understand DTrace.) 9726 */ 9727 static void 9728 dtrace_buffer_drop(dtrace_buffer_t *buf) 9729 { 9730 buf->dtb_drops++; 9731 } 9732 9733 /* 9734 * Note: called from probe context. This function is called to reserve space 9735 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9736 * mstate. Returns the new offset in the buffer, or a negative value if an 9737 * error has occurred. 9738 */ 9739 static intptr_t 9740 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9741 dtrace_state_t *state, dtrace_mstate_t *mstate) 9742 { 9743 intptr_t offs = buf->dtb_offset, soffs; 9744 intptr_t woffs; 9745 caddr_t tomax; 9746 size_t total; 9747 9748 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9749 return (-1); 9750 9751 if ((tomax = buf->dtb_tomax) == NULL) { 9752 dtrace_buffer_drop(buf); 9753 return (-1); 9754 } 9755 9756 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9757 while (offs & (align - 1)) { 9758 /* 9759 * Assert that our alignment is off by a number which 9760 * is itself sizeof (uint32_t) aligned. 9761 */ 9762 ASSERT(!((align - (offs & (align - 1))) & 9763 (sizeof (uint32_t) - 1))); 9764 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9765 offs += sizeof (uint32_t); 9766 } 9767 9768 if ((soffs = offs + needed) > buf->dtb_size) { 9769 dtrace_buffer_drop(buf); 9770 return (-1); 9771 } 9772 9773 if (mstate == NULL) 9774 return (offs); 9775 9776 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 9777 mstate->dtms_scratch_size = buf->dtb_size - soffs; 9778 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9779 9780 return (offs); 9781 } 9782 9783 if (buf->dtb_flags & DTRACEBUF_FILL) { 9784 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 9785 (buf->dtb_flags & DTRACEBUF_FULL)) 9786 return (-1); 9787 goto out; 9788 } 9789 9790 total = needed + (offs & (align - 1)); 9791 9792 /* 9793 * For a ring buffer, life is quite a bit more complicated. Before 9794 * we can store any padding, we need to adjust our wrapping offset. 9795 * (If we've never before wrapped or we're not about to, no adjustment 9796 * is required.) 9797 */ 9798 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 9799 offs + total > buf->dtb_size) { 9800 woffs = buf->dtb_xamot_offset; 9801 9802 if (offs + total > buf->dtb_size) { 9803 /* 9804 * We can't fit in the end of the buffer. First, a 9805 * sanity check that we can fit in the buffer at all. 9806 */ 9807 if (total > buf->dtb_size) { 9808 dtrace_buffer_drop(buf); 9809 return (-1); 9810 } 9811 9812 /* 9813 * We're going to be storing at the top of the buffer, 9814 * so now we need to deal with the wrapped offset. We 9815 * only reset our wrapped offset to 0 if it is 9816 * currently greater than the current offset. If it 9817 * is less than the current offset, it is because a 9818 * previous allocation induced a wrap -- but the 9819 * allocation didn't subsequently take the space due 9820 * to an error or false predicate evaluation. In this 9821 * case, we'll just leave the wrapped offset alone: if 9822 * the wrapped offset hasn't been advanced far enough 9823 * for this allocation, it will be adjusted in the 9824 * lower loop. 9825 */ 9826 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 9827 if (woffs >= offs) 9828 woffs = 0; 9829 } else { 9830 woffs = 0; 9831 } 9832 9833 /* 9834 * Now we know that we're going to be storing to the 9835 * top of the buffer and that there is room for us 9836 * there. We need to clear the buffer from the current 9837 * offset to the end (there may be old gunk there). 9838 */ 9839 while (offs < buf->dtb_size) 9840 tomax[offs++] = 0; 9841 9842 /* 9843 * We need to set our offset to zero. And because we 9844 * are wrapping, we need to set the bit indicating as 9845 * much. We can also adjust our needed space back 9846 * down to the space required by the ECB -- we know 9847 * that the top of the buffer is aligned. 9848 */ 9849 offs = 0; 9850 total = needed; 9851 buf->dtb_flags |= DTRACEBUF_WRAPPED; 9852 } else { 9853 /* 9854 * There is room for us in the buffer, so we simply 9855 * need to check the wrapped offset. 9856 */ 9857 if (woffs < offs) { 9858 /* 9859 * The wrapped offset is less than the offset. 9860 * This can happen if we allocated buffer space 9861 * that induced a wrap, but then we didn't 9862 * subsequently take the space due to an error 9863 * or false predicate evaluation. This is 9864 * okay; we know that _this_ allocation isn't 9865 * going to induce a wrap. We still can't 9866 * reset the wrapped offset to be zero, 9867 * however: the space may have been trashed in 9868 * the previous failed probe attempt. But at 9869 * least the wrapped offset doesn't need to 9870 * be adjusted at all... 9871 */ 9872 goto out; 9873 } 9874 } 9875 9876 while (offs + total > woffs) { 9877 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 9878 size_t size; 9879 9880 if (epid == DTRACE_EPIDNONE) { 9881 size = sizeof (uint32_t); 9882 } else { 9883 ASSERT(epid <= state->dts_necbs); 9884 ASSERT(state->dts_ecbs[epid - 1] != NULL); 9885 9886 size = state->dts_ecbs[epid - 1]->dte_size; 9887 } 9888 9889 ASSERT(woffs + size <= buf->dtb_size); 9890 ASSERT(size != 0); 9891 9892 if (woffs + size == buf->dtb_size) { 9893 /* 9894 * We've reached the end of the buffer; we want 9895 * to set the wrapped offset to 0 and break 9896 * out. However, if the offs is 0, then we're 9897 * in a strange edge-condition: the amount of 9898 * space that we want to reserve plus the size 9899 * of the record that we're overwriting is 9900 * greater than the size of the buffer. This 9901 * is problematic because if we reserve the 9902 * space but subsequently don't consume it (due 9903 * to a failed predicate or error) the wrapped 9904 * offset will be 0 -- yet the EPID at offset 0 9905 * will not be committed. This situation is 9906 * relatively easy to deal with: if we're in 9907 * this case, the buffer is indistinguishable 9908 * from one that hasn't wrapped; we need only 9909 * finish the job by clearing the wrapped bit, 9910 * explicitly setting the offset to be 0, and 9911 * zero'ing out the old data in the buffer. 9912 */ 9913 if (offs == 0) { 9914 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9915 buf->dtb_offset = 0; 9916 woffs = total; 9917 9918 while (woffs < buf->dtb_size) 9919 tomax[woffs++] = 0; 9920 } 9921 9922 woffs = 0; 9923 break; 9924 } 9925 9926 woffs += size; 9927 } 9928 9929 /* 9930 * We have a wrapped offset. It may be that the wrapped offset 9931 * has become zero -- that's okay. 9932 */ 9933 buf->dtb_xamot_offset = woffs; 9934 } 9935 9936 out: 9937 /* 9938 * Now we can plow the buffer with any necessary padding. 9939 */ 9940 while (offs & (align - 1)) { 9941 /* 9942 * Assert that our alignment is off by a number which 9943 * is itself sizeof (uint32_t) aligned. 9944 */ 9945 ASSERT(!((align - (offs & (align - 1))) & 9946 (sizeof (uint32_t) - 1))); 9947 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9948 offs += sizeof (uint32_t); 9949 } 9950 9951 if (buf->dtb_flags & DTRACEBUF_FILL) { 9952 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9953 buf->dtb_flags |= DTRACEBUF_FULL; 9954 return (-1); 9955 } 9956 } 9957 9958 if (mstate == NULL) 9959 return (offs); 9960 9961 /* 9962 * For ring buffers and fill buffers, the scratch space is always 9963 * the inactive buffer. 9964 */ 9965 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9966 mstate->dtms_scratch_size = buf->dtb_size; 9967 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9968 9969 return (offs); 9970 } 9971 9972 static void 9973 dtrace_buffer_polish(dtrace_buffer_t *buf) 9974 { 9975 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9976 ASSERT(MUTEX_HELD(&dtrace_lock)); 9977 9978 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9979 return; 9980 9981 /* 9982 * We need to polish the ring buffer. There are three cases: 9983 * 9984 * - The first (and presumably most common) is that there is no gap 9985 * between the buffer offset and the wrapped offset. In this case, 9986 * there is nothing in the buffer that isn't valid data; we can 9987 * mark the buffer as polished and return. 9988 * 9989 * - The second (less common than the first but still more common 9990 * than the third) is that there is a gap between the buffer offset 9991 * and the wrapped offset, and the wrapped offset is larger than the 9992 * buffer offset. This can happen because of an alignment issue, or 9993 * can happen because of a call to dtrace_buffer_reserve() that 9994 * didn't subsequently consume the buffer space. In this case, 9995 * we need to zero the data from the buffer offset to the wrapped 9996 * offset. 9997 * 9998 * - The third (and least common) is that there is a gap between the 9999 * buffer offset and the wrapped offset, but the wrapped offset is 10000 * _less_ than the buffer offset. This can only happen because a 10001 * call to dtrace_buffer_reserve() induced a wrap, but the space 10002 * was not subsequently consumed. In this case, we need to zero the 10003 * space from the offset to the end of the buffer _and_ from the 10004 * top of the buffer to the wrapped offset. 10005 */ 10006 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10007 bzero(buf->dtb_tomax + buf->dtb_offset, 10008 buf->dtb_xamot_offset - buf->dtb_offset); 10009 } 10010 10011 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10012 bzero(buf->dtb_tomax + buf->dtb_offset, 10013 buf->dtb_size - buf->dtb_offset); 10014 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10015 } 10016 } 10017 10018 static void 10019 dtrace_buffer_free(dtrace_buffer_t *bufs) 10020 { 10021 int i; 10022 10023 for (i = 0; i < NCPU; i++) { 10024 dtrace_buffer_t *buf = &bufs[i]; 10025 10026 if (buf->dtb_tomax == NULL) { 10027 ASSERT(buf->dtb_xamot == NULL); 10028 ASSERT(buf->dtb_size == 0); 10029 continue; 10030 } 10031 10032 if (buf->dtb_xamot != NULL) { 10033 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10034 kmem_free(buf->dtb_xamot, buf->dtb_size); 10035 } 10036 10037 kmem_free(buf->dtb_tomax, buf->dtb_size); 10038 buf->dtb_size = 0; 10039 buf->dtb_tomax = NULL; 10040 buf->dtb_xamot = NULL; 10041 } 10042 } 10043 10044 /* 10045 * DTrace Enabling Functions 10046 */ 10047 static dtrace_enabling_t * 10048 dtrace_enabling_create(dtrace_vstate_t *vstate) 10049 { 10050 dtrace_enabling_t *enab; 10051 10052 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10053 enab->dten_vstate = vstate; 10054 10055 return (enab); 10056 } 10057 10058 static void 10059 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10060 { 10061 dtrace_ecbdesc_t **ndesc; 10062 size_t osize, nsize; 10063 10064 /* 10065 * We can't add to enablings after we've enabled them, or after we've 10066 * retained them. 10067 */ 10068 ASSERT(enab->dten_probegen == 0); 10069 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10070 10071 if (enab->dten_ndesc < enab->dten_maxdesc) { 10072 enab->dten_desc[enab->dten_ndesc++] = ecb; 10073 return; 10074 } 10075 10076 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10077 10078 if (enab->dten_maxdesc == 0) { 10079 enab->dten_maxdesc = 1; 10080 } else { 10081 enab->dten_maxdesc <<= 1; 10082 } 10083 10084 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 10085 10086 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10087 ndesc = kmem_zalloc(nsize, KM_SLEEP); 10088 bcopy(enab->dten_desc, ndesc, osize); 10089 kmem_free(enab->dten_desc, osize); 10090 10091 enab->dten_desc = ndesc; 10092 enab->dten_desc[enab->dten_ndesc++] = ecb; 10093 } 10094 10095 static void 10096 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 10097 dtrace_probedesc_t *pd) 10098 { 10099 dtrace_ecbdesc_t *new; 10100 dtrace_predicate_t *pred; 10101 dtrace_actdesc_t *act; 10102 10103 /* 10104 * We're going to create a new ECB description that matches the 10105 * specified ECB in every way, but has the specified probe description. 10106 */ 10107 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10108 10109 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 10110 dtrace_predicate_hold(pred); 10111 10112 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 10113 dtrace_actdesc_hold(act); 10114 10115 new->dted_action = ecb->dted_action; 10116 new->dted_pred = ecb->dted_pred; 10117 new->dted_probe = *pd; 10118 new->dted_uarg = ecb->dted_uarg; 10119 10120 dtrace_enabling_add(enab, new); 10121 } 10122 10123 static void 10124 dtrace_enabling_dump(dtrace_enabling_t *enab) 10125 { 10126 int i; 10127 10128 for (i = 0; i < enab->dten_ndesc; i++) { 10129 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 10130 10131 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 10132 desc->dtpd_provider, desc->dtpd_mod, 10133 desc->dtpd_func, desc->dtpd_name); 10134 } 10135 } 10136 10137 static void 10138 dtrace_enabling_destroy(dtrace_enabling_t *enab) 10139 { 10140 int i; 10141 dtrace_ecbdesc_t *ep; 10142 dtrace_vstate_t *vstate = enab->dten_vstate; 10143 10144 ASSERT(MUTEX_HELD(&dtrace_lock)); 10145 10146 for (i = 0; i < enab->dten_ndesc; i++) { 10147 dtrace_actdesc_t *act, *next; 10148 dtrace_predicate_t *pred; 10149 10150 ep = enab->dten_desc[i]; 10151 10152 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 10153 dtrace_predicate_release(pred, vstate); 10154 10155 for (act = ep->dted_action; act != NULL; act = next) { 10156 next = act->dtad_next; 10157 dtrace_actdesc_release(act, vstate); 10158 } 10159 10160 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10161 } 10162 10163 kmem_free(enab->dten_desc, 10164 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 10165 10166 /* 10167 * If this was a retained enabling, decrement the dts_nretained count 10168 * and take it off of the dtrace_retained list. 10169 */ 10170 if (enab->dten_prev != NULL || enab->dten_next != NULL || 10171 dtrace_retained == enab) { 10172 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10173 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 10174 enab->dten_vstate->dtvs_state->dts_nretained--; 10175 } 10176 10177 if (enab->dten_prev == NULL) { 10178 if (dtrace_retained == enab) { 10179 dtrace_retained = enab->dten_next; 10180 10181 if (dtrace_retained != NULL) 10182 dtrace_retained->dten_prev = NULL; 10183 } 10184 } else { 10185 ASSERT(enab != dtrace_retained); 10186 ASSERT(dtrace_retained != NULL); 10187 enab->dten_prev->dten_next = enab->dten_next; 10188 } 10189 10190 if (enab->dten_next != NULL) { 10191 ASSERT(dtrace_retained != NULL); 10192 enab->dten_next->dten_prev = enab->dten_prev; 10193 } 10194 10195 kmem_free(enab, sizeof (dtrace_enabling_t)); 10196 } 10197 10198 static int 10199 dtrace_enabling_retain(dtrace_enabling_t *enab) 10200 { 10201 dtrace_state_t *state; 10202 10203 ASSERT(MUTEX_HELD(&dtrace_lock)); 10204 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10205 ASSERT(enab->dten_vstate != NULL); 10206 10207 state = enab->dten_vstate->dtvs_state; 10208 ASSERT(state != NULL); 10209 10210 /* 10211 * We only allow each state to retain dtrace_retain_max enablings. 10212 */ 10213 if (state->dts_nretained >= dtrace_retain_max) 10214 return (ENOSPC); 10215 10216 state->dts_nretained++; 10217 10218 if (dtrace_retained == NULL) { 10219 dtrace_retained = enab; 10220 return (0); 10221 } 10222 10223 enab->dten_next = dtrace_retained; 10224 dtrace_retained->dten_prev = enab; 10225 dtrace_retained = enab; 10226 10227 return (0); 10228 } 10229 10230 static int 10231 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 10232 dtrace_probedesc_t *create) 10233 { 10234 dtrace_enabling_t *new, *enab; 10235 int found = 0, err = ENOENT; 10236 10237 ASSERT(MUTEX_HELD(&dtrace_lock)); 10238 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 10239 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 10240 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 10241 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 10242 10243 new = dtrace_enabling_create(&state->dts_vstate); 10244 10245 /* 10246 * Iterate over all retained enablings, looking for enablings that 10247 * match the specified state. 10248 */ 10249 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10250 int i; 10251 10252 /* 10253 * dtvs_state can only be NULL for helper enablings -- and 10254 * helper enablings can't be retained. 10255 */ 10256 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10257 10258 if (enab->dten_vstate->dtvs_state != state) 10259 continue; 10260 10261 /* 10262 * Now iterate over each probe description; we're looking for 10263 * an exact match to the specified probe description. 10264 */ 10265 for (i = 0; i < enab->dten_ndesc; i++) { 10266 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10267 dtrace_probedesc_t *pd = &ep->dted_probe; 10268 10269 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 10270 continue; 10271 10272 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 10273 continue; 10274 10275 if (strcmp(pd->dtpd_func, match->dtpd_func)) 10276 continue; 10277 10278 if (strcmp(pd->dtpd_name, match->dtpd_name)) 10279 continue; 10280 10281 /* 10282 * We have a winning probe! Add it to our growing 10283 * enabling. 10284 */ 10285 found = 1; 10286 dtrace_enabling_addlike(new, ep, create); 10287 } 10288 } 10289 10290 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 10291 dtrace_enabling_destroy(new); 10292 return (err); 10293 } 10294 10295 return (0); 10296 } 10297 10298 static void 10299 dtrace_enabling_retract(dtrace_state_t *state) 10300 { 10301 dtrace_enabling_t *enab, *next; 10302 10303 ASSERT(MUTEX_HELD(&dtrace_lock)); 10304 10305 /* 10306 * Iterate over all retained enablings, destroy the enablings retained 10307 * for the specified state. 10308 */ 10309 for (enab = dtrace_retained; enab != NULL; enab = next) { 10310 next = enab->dten_next; 10311 10312 /* 10313 * dtvs_state can only be NULL for helper enablings -- and 10314 * helper enablings can't be retained. 10315 */ 10316 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10317 10318 if (enab->dten_vstate->dtvs_state == state) { 10319 ASSERT(state->dts_nretained > 0); 10320 dtrace_enabling_destroy(enab); 10321 } 10322 } 10323 10324 ASSERT(state->dts_nretained == 0); 10325 } 10326 10327 static int 10328 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 10329 { 10330 int i = 0; 10331 int matched = 0; 10332 10333 ASSERT(MUTEX_HELD(&cpu_lock)); 10334 ASSERT(MUTEX_HELD(&dtrace_lock)); 10335 10336 for (i = 0; i < enab->dten_ndesc; i++) { 10337 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10338 10339 enab->dten_current = ep; 10340 enab->dten_error = 0; 10341 10342 matched += dtrace_probe_enable(&ep->dted_probe, enab); 10343 10344 if (enab->dten_error != 0) { 10345 /* 10346 * If we get an error half-way through enabling the 10347 * probes, we kick out -- perhaps with some number of 10348 * them enabled. Leaving enabled probes enabled may 10349 * be slightly confusing for user-level, but we expect 10350 * that no one will attempt to actually drive on in 10351 * the face of such errors. If this is an anonymous 10352 * enabling (indicated with a NULL nmatched pointer), 10353 * we cmn_err() a message. We aren't expecting to 10354 * get such an error -- such as it can exist at all, 10355 * it would be a result of corrupted DOF in the driver 10356 * properties. 10357 */ 10358 if (nmatched == NULL) { 10359 cmn_err(CE_WARN, "dtrace_enabling_match() " 10360 "error on %p: %d", (void *)ep, 10361 enab->dten_error); 10362 } 10363 10364 return (enab->dten_error); 10365 } 10366 } 10367 10368 enab->dten_probegen = dtrace_probegen; 10369 if (nmatched != NULL) 10370 *nmatched = matched; 10371 10372 return (0); 10373 } 10374 10375 static void 10376 dtrace_enabling_matchall(void) 10377 { 10378 dtrace_enabling_t *enab; 10379 10380 mutex_enter(&cpu_lock); 10381 mutex_enter(&dtrace_lock); 10382 10383 /* 10384 * Because we can be called after dtrace_detach() has been called, we 10385 * cannot assert that there are retained enablings. We can safely 10386 * load from dtrace_retained, however: the taskq_destroy() at the 10387 * end of dtrace_detach() will block pending our completion. 10388 */ 10389 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 10390 (void) dtrace_enabling_match(enab, NULL); 10391 10392 mutex_exit(&dtrace_lock); 10393 mutex_exit(&cpu_lock); 10394 } 10395 10396 static int 10397 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 10398 { 10399 dtrace_enabling_t *enab; 10400 int matched, total = 0, err; 10401 10402 ASSERT(MUTEX_HELD(&cpu_lock)); 10403 ASSERT(MUTEX_HELD(&dtrace_lock)); 10404 10405 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10406 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10407 10408 if (enab->dten_vstate->dtvs_state != state) 10409 continue; 10410 10411 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 10412 return (err); 10413 10414 total += matched; 10415 } 10416 10417 if (nmatched != NULL) 10418 *nmatched = total; 10419 10420 return (0); 10421 } 10422 10423 /* 10424 * If an enabling is to be enabled without having matched probes (that is, if 10425 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 10426 * enabling must be _primed_ by creating an ECB for every ECB description. 10427 * This must be done to assure that we know the number of speculations, the 10428 * number of aggregations, the minimum buffer size needed, etc. before we 10429 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 10430 * enabling any probes, we create ECBs for every ECB decription, but with a 10431 * NULL probe -- which is exactly what this function does. 10432 */ 10433 static void 10434 dtrace_enabling_prime(dtrace_state_t *state) 10435 { 10436 dtrace_enabling_t *enab; 10437 int i; 10438 10439 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10440 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10441 10442 if (enab->dten_vstate->dtvs_state != state) 10443 continue; 10444 10445 /* 10446 * We don't want to prime an enabling more than once, lest 10447 * we allow a malicious user to induce resource exhaustion. 10448 * (The ECBs that result from priming an enabling aren't 10449 * leaked -- but they also aren't deallocated until the 10450 * consumer state is destroyed.) 10451 */ 10452 if (enab->dten_primed) 10453 continue; 10454 10455 for (i = 0; i < enab->dten_ndesc; i++) { 10456 enab->dten_current = enab->dten_desc[i]; 10457 (void) dtrace_probe_enable(NULL, enab); 10458 } 10459 10460 enab->dten_primed = 1; 10461 } 10462 } 10463 10464 /* 10465 * Called to indicate that probes should be provided due to retained 10466 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 10467 * must take an initial lap through the enabling calling the dtps_provide() 10468 * entry point explicitly to allow for autocreated probes. 10469 */ 10470 static void 10471 dtrace_enabling_provide(dtrace_provider_t *prv) 10472 { 10473 int i, all = 0; 10474 dtrace_probedesc_t desc; 10475 10476 ASSERT(MUTEX_HELD(&dtrace_lock)); 10477 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 10478 10479 if (prv == NULL) { 10480 all = 1; 10481 prv = dtrace_provider; 10482 } 10483 10484 do { 10485 dtrace_enabling_t *enab = dtrace_retained; 10486 void *parg = prv->dtpv_arg; 10487 10488 for (; enab != NULL; enab = enab->dten_next) { 10489 for (i = 0; i < enab->dten_ndesc; i++) { 10490 desc = enab->dten_desc[i]->dted_probe; 10491 mutex_exit(&dtrace_lock); 10492 prv->dtpv_pops.dtps_provide(parg, &desc); 10493 mutex_enter(&dtrace_lock); 10494 } 10495 } 10496 } while (all && (prv = prv->dtpv_next) != NULL); 10497 10498 mutex_exit(&dtrace_lock); 10499 dtrace_probe_provide(NULL, all ? NULL : prv); 10500 mutex_enter(&dtrace_lock); 10501 } 10502 10503 /* 10504 * DTrace DOF Functions 10505 */ 10506 /*ARGSUSED*/ 10507 static void 10508 dtrace_dof_error(dof_hdr_t *dof, const char *str) 10509 { 10510 if (dtrace_err_verbose) 10511 cmn_err(CE_WARN, "failed to process DOF: %s", str); 10512 10513 #ifdef DTRACE_ERRDEBUG 10514 dtrace_errdebug(str); 10515 #endif 10516 } 10517 10518 /* 10519 * Create DOF out of a currently enabled state. Right now, we only create 10520 * DOF containing the run-time options -- but this could be expanded to create 10521 * complete DOF representing the enabled state. 10522 */ 10523 static dof_hdr_t * 10524 dtrace_dof_create(dtrace_state_t *state) 10525 { 10526 dof_hdr_t *dof; 10527 dof_sec_t *sec; 10528 dof_optdesc_t *opt; 10529 int i, len = sizeof (dof_hdr_t) + 10530 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 10531 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10532 10533 ASSERT(MUTEX_HELD(&dtrace_lock)); 10534 10535 dof = kmem_zalloc(len, KM_SLEEP); 10536 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 10537 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 10538 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 10539 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 10540 10541 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 10542 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 10543 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 10544 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 10545 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 10546 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 10547 10548 dof->dofh_flags = 0; 10549 dof->dofh_hdrsize = sizeof (dof_hdr_t); 10550 dof->dofh_secsize = sizeof (dof_sec_t); 10551 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 10552 dof->dofh_secoff = sizeof (dof_hdr_t); 10553 dof->dofh_loadsz = len; 10554 dof->dofh_filesz = len; 10555 dof->dofh_pad = 0; 10556 10557 /* 10558 * Fill in the option section header... 10559 */ 10560 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 10561 sec->dofs_type = DOF_SECT_OPTDESC; 10562 sec->dofs_align = sizeof (uint64_t); 10563 sec->dofs_flags = DOF_SECF_LOAD; 10564 sec->dofs_entsize = sizeof (dof_optdesc_t); 10565 10566 opt = (dof_optdesc_t *)((uintptr_t)sec + 10567 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 10568 10569 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 10570 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10571 10572 for (i = 0; i < DTRACEOPT_MAX; i++) { 10573 opt[i].dofo_option = i; 10574 opt[i].dofo_strtab = DOF_SECIDX_NONE; 10575 opt[i].dofo_value = state->dts_options[i]; 10576 } 10577 10578 return (dof); 10579 } 10580 10581 static dof_hdr_t * 10582 dtrace_dof_copyin(uintptr_t uarg, int *errp) 10583 { 10584 dof_hdr_t hdr, *dof; 10585 10586 ASSERT(!MUTEX_HELD(&dtrace_lock)); 10587 10588 /* 10589 * First, we're going to copyin() the sizeof (dof_hdr_t). 10590 */ 10591 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 10592 dtrace_dof_error(NULL, "failed to copyin DOF header"); 10593 *errp = EFAULT; 10594 return (NULL); 10595 } 10596 10597 /* 10598 * Now we'll allocate the entire DOF and copy it in -- provided 10599 * that the length isn't outrageous. 10600 */ 10601 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 10602 dtrace_dof_error(&hdr, "load size exceeds maximum"); 10603 *errp = E2BIG; 10604 return (NULL); 10605 } 10606 10607 if (hdr.dofh_loadsz < sizeof (hdr)) { 10608 dtrace_dof_error(&hdr, "invalid load size"); 10609 *errp = EINVAL; 10610 return (NULL); 10611 } 10612 10613 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 10614 10615 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 10616 kmem_free(dof, hdr.dofh_loadsz); 10617 *errp = EFAULT; 10618 return (NULL); 10619 } 10620 10621 return (dof); 10622 } 10623 10624 static dof_hdr_t * 10625 dtrace_dof_property(const char *name) 10626 { 10627 uchar_t *buf; 10628 uint64_t loadsz; 10629 unsigned int len, i; 10630 dof_hdr_t *dof; 10631 10632 /* 10633 * Unfortunately, array of values in .conf files are always (and 10634 * only) interpreted to be integer arrays. We must read our DOF 10635 * as an integer array, and then squeeze it into a byte array. 10636 */ 10637 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 10638 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 10639 return (NULL); 10640 10641 for (i = 0; i < len; i++) 10642 buf[i] = (uchar_t)(((int *)buf)[i]); 10643 10644 if (len < sizeof (dof_hdr_t)) { 10645 ddi_prop_free(buf); 10646 dtrace_dof_error(NULL, "truncated header"); 10647 return (NULL); 10648 } 10649 10650 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 10651 ddi_prop_free(buf); 10652 dtrace_dof_error(NULL, "truncated DOF"); 10653 return (NULL); 10654 } 10655 10656 if (loadsz >= dtrace_dof_maxsize) { 10657 ddi_prop_free(buf); 10658 dtrace_dof_error(NULL, "oversized DOF"); 10659 return (NULL); 10660 } 10661 10662 dof = kmem_alloc(loadsz, KM_SLEEP); 10663 bcopy(buf, dof, loadsz); 10664 ddi_prop_free(buf); 10665 10666 return (dof); 10667 } 10668 10669 static void 10670 dtrace_dof_destroy(dof_hdr_t *dof) 10671 { 10672 kmem_free(dof, dof->dofh_loadsz); 10673 } 10674 10675 /* 10676 * Return the dof_sec_t pointer corresponding to a given section index. If the 10677 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 10678 * a type other than DOF_SECT_NONE is specified, the header is checked against 10679 * this type and NULL is returned if the types do not match. 10680 */ 10681 static dof_sec_t * 10682 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 10683 { 10684 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 10685 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 10686 10687 if (i >= dof->dofh_secnum) { 10688 dtrace_dof_error(dof, "referenced section index is invalid"); 10689 return (NULL); 10690 } 10691 10692 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 10693 dtrace_dof_error(dof, "referenced section is not loadable"); 10694 return (NULL); 10695 } 10696 10697 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 10698 dtrace_dof_error(dof, "referenced section is the wrong type"); 10699 return (NULL); 10700 } 10701 10702 return (sec); 10703 } 10704 10705 static dtrace_probedesc_t * 10706 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 10707 { 10708 dof_probedesc_t *probe; 10709 dof_sec_t *strtab; 10710 uintptr_t daddr = (uintptr_t)dof; 10711 uintptr_t str; 10712 size_t size; 10713 10714 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 10715 dtrace_dof_error(dof, "invalid probe section"); 10716 return (NULL); 10717 } 10718 10719 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10720 dtrace_dof_error(dof, "bad alignment in probe description"); 10721 return (NULL); 10722 } 10723 10724 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10725 dtrace_dof_error(dof, "truncated probe description"); 10726 return (NULL); 10727 } 10728 10729 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10730 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10731 10732 if (strtab == NULL) 10733 return (NULL); 10734 10735 str = daddr + strtab->dofs_offset; 10736 size = strtab->dofs_size; 10737 10738 if (probe->dofp_provider >= strtab->dofs_size) { 10739 dtrace_dof_error(dof, "corrupt probe provider"); 10740 return (NULL); 10741 } 10742 10743 (void) strncpy(desc->dtpd_provider, 10744 (char *)(str + probe->dofp_provider), 10745 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10746 10747 if (probe->dofp_mod >= strtab->dofs_size) { 10748 dtrace_dof_error(dof, "corrupt probe module"); 10749 return (NULL); 10750 } 10751 10752 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10753 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10754 10755 if (probe->dofp_func >= strtab->dofs_size) { 10756 dtrace_dof_error(dof, "corrupt probe function"); 10757 return (NULL); 10758 } 10759 10760 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10761 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10762 10763 if (probe->dofp_name >= strtab->dofs_size) { 10764 dtrace_dof_error(dof, "corrupt probe name"); 10765 return (NULL); 10766 } 10767 10768 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10769 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10770 10771 return (desc); 10772 } 10773 10774 static dtrace_difo_t * 10775 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10776 cred_t *cr) 10777 { 10778 dtrace_difo_t *dp; 10779 size_t ttl = 0; 10780 dof_difohdr_t *dofd; 10781 uintptr_t daddr = (uintptr_t)dof; 10782 size_t max = dtrace_difo_maxsize; 10783 int i, l, n; 10784 10785 static const struct { 10786 int section; 10787 int bufoffs; 10788 int lenoffs; 10789 int entsize; 10790 int align; 10791 const char *msg; 10792 } difo[] = { 10793 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 10794 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 10795 sizeof (dif_instr_t), "multiple DIF sections" }, 10796 10797 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 10798 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 10799 sizeof (uint64_t), "multiple integer tables" }, 10800 10801 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 10802 offsetof(dtrace_difo_t, dtdo_strlen), 0, 10803 sizeof (char), "multiple string tables" }, 10804 10805 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 10806 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 10807 sizeof (uint_t), "multiple variable tables" }, 10808 10809 { DOF_SECT_NONE, 0, 0, 0, NULL } 10810 }; 10811 10812 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 10813 dtrace_dof_error(dof, "invalid DIFO header section"); 10814 return (NULL); 10815 } 10816 10817 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10818 dtrace_dof_error(dof, "bad alignment in DIFO header"); 10819 return (NULL); 10820 } 10821 10822 if (sec->dofs_size < sizeof (dof_difohdr_t) || 10823 sec->dofs_size % sizeof (dof_secidx_t)) { 10824 dtrace_dof_error(dof, "bad size in DIFO header"); 10825 return (NULL); 10826 } 10827 10828 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10829 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 10830 10831 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10832 dp->dtdo_rtype = dofd->dofd_rtype; 10833 10834 for (l = 0; l < n; l++) { 10835 dof_sec_t *subsec; 10836 void **bufp; 10837 uint32_t *lenp; 10838 10839 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 10840 dofd->dofd_links[l])) == NULL) 10841 goto err; /* invalid section link */ 10842 10843 if (ttl + subsec->dofs_size > max) { 10844 dtrace_dof_error(dof, "exceeds maximum size"); 10845 goto err; 10846 } 10847 10848 ttl += subsec->dofs_size; 10849 10850 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 10851 if (subsec->dofs_type != difo[i].section) 10852 continue; 10853 10854 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 10855 dtrace_dof_error(dof, "section not loaded"); 10856 goto err; 10857 } 10858 10859 if (subsec->dofs_align != difo[i].align) { 10860 dtrace_dof_error(dof, "bad alignment"); 10861 goto err; 10862 } 10863 10864 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 10865 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 10866 10867 if (*bufp != NULL) { 10868 dtrace_dof_error(dof, difo[i].msg); 10869 goto err; 10870 } 10871 10872 if (difo[i].entsize != subsec->dofs_entsize) { 10873 dtrace_dof_error(dof, "entry size mismatch"); 10874 goto err; 10875 } 10876 10877 if (subsec->dofs_entsize != 0 && 10878 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 10879 dtrace_dof_error(dof, "corrupt entry size"); 10880 goto err; 10881 } 10882 10883 *lenp = subsec->dofs_size; 10884 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 10885 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 10886 *bufp, subsec->dofs_size); 10887 10888 if (subsec->dofs_entsize != 0) 10889 *lenp /= subsec->dofs_entsize; 10890 10891 break; 10892 } 10893 10894 /* 10895 * If we encounter a loadable DIFO sub-section that is not 10896 * known to us, assume this is a broken program and fail. 10897 */ 10898 if (difo[i].section == DOF_SECT_NONE && 10899 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10900 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10901 goto err; 10902 } 10903 } 10904 10905 if (dp->dtdo_buf == NULL) { 10906 /* 10907 * We can't have a DIF object without DIF text. 10908 */ 10909 dtrace_dof_error(dof, "missing DIF text"); 10910 goto err; 10911 } 10912 10913 /* 10914 * Before we validate the DIF object, run through the variable table 10915 * looking for the strings -- if any of their size are under, we'll set 10916 * their size to be the system-wide default string size. Note that 10917 * this should _not_ happen if the "strsize" option has been set -- 10918 * in this case, the compiler should have set the size to reflect the 10919 * setting of the option. 10920 */ 10921 for (i = 0; i < dp->dtdo_varlen; i++) { 10922 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10923 dtrace_diftype_t *t = &v->dtdv_type; 10924 10925 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10926 continue; 10927 10928 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10929 t->dtdt_size = dtrace_strsize_default; 10930 } 10931 10932 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10933 goto err; 10934 10935 dtrace_difo_init(dp, vstate); 10936 return (dp); 10937 10938 err: 10939 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10940 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10941 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10942 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10943 10944 kmem_free(dp, sizeof (dtrace_difo_t)); 10945 return (NULL); 10946 } 10947 10948 static dtrace_predicate_t * 10949 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10950 cred_t *cr) 10951 { 10952 dtrace_difo_t *dp; 10953 10954 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10955 return (NULL); 10956 10957 return (dtrace_predicate_create(dp)); 10958 } 10959 10960 static dtrace_actdesc_t * 10961 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10962 cred_t *cr) 10963 { 10964 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10965 dof_actdesc_t *desc; 10966 dof_sec_t *difosec; 10967 size_t offs; 10968 uintptr_t daddr = (uintptr_t)dof; 10969 uint64_t arg; 10970 dtrace_actkind_t kind; 10971 10972 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10973 dtrace_dof_error(dof, "invalid action section"); 10974 return (NULL); 10975 } 10976 10977 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10978 dtrace_dof_error(dof, "truncated action description"); 10979 return (NULL); 10980 } 10981 10982 if (sec->dofs_align != sizeof (uint64_t)) { 10983 dtrace_dof_error(dof, "bad alignment in action description"); 10984 return (NULL); 10985 } 10986 10987 if (sec->dofs_size < sec->dofs_entsize) { 10988 dtrace_dof_error(dof, "section entry size exceeds total size"); 10989 return (NULL); 10990 } 10991 10992 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10993 dtrace_dof_error(dof, "bad entry size in action description"); 10994 return (NULL); 10995 } 10996 10997 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10998 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10999 return (NULL); 11000 } 11001 11002 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 11003 desc = (dof_actdesc_t *)(daddr + 11004 (uintptr_t)sec->dofs_offset + offs); 11005 kind = (dtrace_actkind_t)desc->dofa_kind; 11006 11007 if (DTRACEACT_ISPRINTFLIKE(kind) && 11008 (kind != DTRACEACT_PRINTA || 11009 desc->dofa_strtab != DOF_SECIDX_NONE)) { 11010 dof_sec_t *strtab; 11011 char *str, *fmt; 11012 uint64_t i; 11013 11014 /* 11015 * printf()-like actions must have a format string. 11016 */ 11017 if ((strtab = dtrace_dof_sect(dof, 11018 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 11019 goto err; 11020 11021 str = (char *)((uintptr_t)dof + 11022 (uintptr_t)strtab->dofs_offset); 11023 11024 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 11025 if (str[i] == '\0') 11026 break; 11027 } 11028 11029 if (i >= strtab->dofs_size) { 11030 dtrace_dof_error(dof, "bogus format string"); 11031 goto err; 11032 } 11033 11034 if (i == desc->dofa_arg) { 11035 dtrace_dof_error(dof, "empty format string"); 11036 goto err; 11037 } 11038 11039 i -= desc->dofa_arg; 11040 fmt = kmem_alloc(i + 1, KM_SLEEP); 11041 bcopy(&str[desc->dofa_arg], fmt, i + 1); 11042 arg = (uint64_t)(uintptr_t)fmt; 11043 } else { 11044 if (kind == DTRACEACT_PRINTA) { 11045 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 11046 arg = 0; 11047 } else { 11048 arg = desc->dofa_arg; 11049 } 11050 } 11051 11052 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 11053 desc->dofa_uarg, arg); 11054 11055 if (last != NULL) { 11056 last->dtad_next = act; 11057 } else { 11058 first = act; 11059 } 11060 11061 last = act; 11062 11063 if (desc->dofa_difo == DOF_SECIDX_NONE) 11064 continue; 11065 11066 if ((difosec = dtrace_dof_sect(dof, 11067 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 11068 goto err; 11069 11070 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 11071 11072 if (act->dtad_difo == NULL) 11073 goto err; 11074 } 11075 11076 ASSERT(first != NULL); 11077 return (first); 11078 11079 err: 11080 for (act = first; act != NULL; act = next) { 11081 next = act->dtad_next; 11082 dtrace_actdesc_release(act, vstate); 11083 } 11084 11085 return (NULL); 11086 } 11087 11088 static dtrace_ecbdesc_t * 11089 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11090 cred_t *cr) 11091 { 11092 dtrace_ecbdesc_t *ep; 11093 dof_ecbdesc_t *ecb; 11094 dtrace_probedesc_t *desc; 11095 dtrace_predicate_t *pred = NULL; 11096 11097 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 11098 dtrace_dof_error(dof, "truncated ECB description"); 11099 return (NULL); 11100 } 11101 11102 if (sec->dofs_align != sizeof (uint64_t)) { 11103 dtrace_dof_error(dof, "bad alignment in ECB description"); 11104 return (NULL); 11105 } 11106 11107 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 11108 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 11109 11110 if (sec == NULL) 11111 return (NULL); 11112 11113 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11114 ep->dted_uarg = ecb->dofe_uarg; 11115 desc = &ep->dted_probe; 11116 11117 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 11118 goto err; 11119 11120 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 11121 if ((sec = dtrace_dof_sect(dof, 11122 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 11123 goto err; 11124 11125 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 11126 goto err; 11127 11128 ep->dted_pred.dtpdd_predicate = pred; 11129 } 11130 11131 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 11132 if ((sec = dtrace_dof_sect(dof, 11133 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 11134 goto err; 11135 11136 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 11137 11138 if (ep->dted_action == NULL) 11139 goto err; 11140 } 11141 11142 return (ep); 11143 11144 err: 11145 if (pred != NULL) 11146 dtrace_predicate_release(pred, vstate); 11147 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11148 return (NULL); 11149 } 11150 11151 /* 11152 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 11153 * specified DOF. At present, this amounts to simply adding 'ubase' to the 11154 * site of any user SETX relocations to account for load object base address. 11155 * In the future, if we need other relocations, this function can be extended. 11156 */ 11157 static int 11158 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 11159 { 11160 uintptr_t daddr = (uintptr_t)dof; 11161 dof_relohdr_t *dofr = 11162 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11163 dof_sec_t *ss, *rs, *ts; 11164 dof_relodesc_t *r; 11165 uint_t i, n; 11166 11167 if (sec->dofs_size < sizeof (dof_relohdr_t) || 11168 sec->dofs_align != sizeof (dof_secidx_t)) { 11169 dtrace_dof_error(dof, "invalid relocation header"); 11170 return (-1); 11171 } 11172 11173 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 11174 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 11175 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 11176 11177 if (ss == NULL || rs == NULL || ts == NULL) 11178 return (-1); /* dtrace_dof_error() has been called already */ 11179 11180 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 11181 rs->dofs_align != sizeof (uint64_t)) { 11182 dtrace_dof_error(dof, "invalid relocation section"); 11183 return (-1); 11184 } 11185 11186 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 11187 n = rs->dofs_size / rs->dofs_entsize; 11188 11189 for (i = 0; i < n; i++) { 11190 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 11191 11192 switch (r->dofr_type) { 11193 case DOF_RELO_NONE: 11194 break; 11195 case DOF_RELO_SETX: 11196 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 11197 sizeof (uint64_t) > ts->dofs_size) { 11198 dtrace_dof_error(dof, "bad relocation offset"); 11199 return (-1); 11200 } 11201 11202 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 11203 dtrace_dof_error(dof, "misaligned setx relo"); 11204 return (-1); 11205 } 11206 11207 *(uint64_t *)taddr += ubase; 11208 break; 11209 default: 11210 dtrace_dof_error(dof, "invalid relocation type"); 11211 return (-1); 11212 } 11213 11214 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 11215 } 11216 11217 return (0); 11218 } 11219 11220 /* 11221 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 11222 * header: it should be at the front of a memory region that is at least 11223 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 11224 * size. It need not be validated in any other way. 11225 */ 11226 static int 11227 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 11228 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 11229 { 11230 uint64_t len = dof->dofh_loadsz, seclen; 11231 uintptr_t daddr = (uintptr_t)dof; 11232 dtrace_ecbdesc_t *ep; 11233 dtrace_enabling_t *enab; 11234 uint_t i; 11235 11236 ASSERT(MUTEX_HELD(&dtrace_lock)); 11237 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 11238 11239 /* 11240 * Check the DOF header identification bytes. In addition to checking 11241 * valid settings, we also verify that unused bits/bytes are zeroed so 11242 * we can use them later without fear of regressing existing binaries. 11243 */ 11244 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 11245 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 11246 dtrace_dof_error(dof, "DOF magic string mismatch"); 11247 return (-1); 11248 } 11249 11250 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 11251 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 11252 dtrace_dof_error(dof, "DOF has invalid data model"); 11253 return (-1); 11254 } 11255 11256 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 11257 dtrace_dof_error(dof, "DOF encoding mismatch"); 11258 return (-1); 11259 } 11260 11261 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 11262 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 11263 dtrace_dof_error(dof, "DOF version mismatch"); 11264 return (-1); 11265 } 11266 11267 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 11268 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 11269 return (-1); 11270 } 11271 11272 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 11273 dtrace_dof_error(dof, "DOF uses too many integer registers"); 11274 return (-1); 11275 } 11276 11277 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 11278 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 11279 return (-1); 11280 } 11281 11282 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 11283 if (dof->dofh_ident[i] != 0) { 11284 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 11285 return (-1); 11286 } 11287 } 11288 11289 if (dof->dofh_flags & ~DOF_FL_VALID) { 11290 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 11291 return (-1); 11292 } 11293 11294 if (dof->dofh_secsize == 0) { 11295 dtrace_dof_error(dof, "zero section header size"); 11296 return (-1); 11297 } 11298 11299 /* 11300 * Check that the section headers don't exceed the amount of DOF 11301 * data. Note that we cast the section size and number of sections 11302 * to uint64_t's to prevent possible overflow in the multiplication. 11303 */ 11304 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 11305 11306 if (dof->dofh_secoff > len || seclen > len || 11307 dof->dofh_secoff + seclen > len) { 11308 dtrace_dof_error(dof, "truncated section headers"); 11309 return (-1); 11310 } 11311 11312 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 11313 dtrace_dof_error(dof, "misaligned section headers"); 11314 return (-1); 11315 } 11316 11317 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 11318 dtrace_dof_error(dof, "misaligned section size"); 11319 return (-1); 11320 } 11321 11322 /* 11323 * Take an initial pass through the section headers to be sure that 11324 * the headers don't have stray offsets. If the 'noprobes' flag is 11325 * set, do not permit sections relating to providers, probes, or args. 11326 */ 11327 for (i = 0; i < dof->dofh_secnum; i++) { 11328 dof_sec_t *sec = (dof_sec_t *)(daddr + 11329 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11330 11331 if (noprobes) { 11332 switch (sec->dofs_type) { 11333 case DOF_SECT_PROVIDER: 11334 case DOF_SECT_PROBES: 11335 case DOF_SECT_PRARGS: 11336 case DOF_SECT_PROFFS: 11337 dtrace_dof_error(dof, "illegal sections " 11338 "for enabling"); 11339 return (-1); 11340 } 11341 } 11342 11343 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11344 continue; /* just ignore non-loadable sections */ 11345 11346 if (sec->dofs_align & (sec->dofs_align - 1)) { 11347 dtrace_dof_error(dof, "bad section alignment"); 11348 return (-1); 11349 } 11350 11351 if (sec->dofs_offset & (sec->dofs_align - 1)) { 11352 dtrace_dof_error(dof, "misaligned section"); 11353 return (-1); 11354 } 11355 11356 if (sec->dofs_offset > len || sec->dofs_size > len || 11357 sec->dofs_offset + sec->dofs_size > len) { 11358 dtrace_dof_error(dof, "corrupt section header"); 11359 return (-1); 11360 } 11361 11362 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 11363 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 11364 dtrace_dof_error(dof, "non-terminating string table"); 11365 return (-1); 11366 } 11367 } 11368 11369 /* 11370 * Take a second pass through the sections and locate and perform any 11371 * relocations that are present. We do this after the first pass to 11372 * be sure that all sections have had their headers validated. 11373 */ 11374 for (i = 0; i < dof->dofh_secnum; i++) { 11375 dof_sec_t *sec = (dof_sec_t *)(daddr + 11376 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11377 11378 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11379 continue; /* skip sections that are not loadable */ 11380 11381 switch (sec->dofs_type) { 11382 case DOF_SECT_URELHDR: 11383 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 11384 return (-1); 11385 break; 11386 } 11387 } 11388 11389 if ((enab = *enabp) == NULL) 11390 enab = *enabp = dtrace_enabling_create(vstate); 11391 11392 for (i = 0; i < dof->dofh_secnum; i++) { 11393 dof_sec_t *sec = (dof_sec_t *)(daddr + 11394 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11395 11396 if (sec->dofs_type != DOF_SECT_ECBDESC) 11397 continue; 11398 11399 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 11400 dtrace_enabling_destroy(enab); 11401 *enabp = NULL; 11402 return (-1); 11403 } 11404 11405 dtrace_enabling_add(enab, ep); 11406 } 11407 11408 return (0); 11409 } 11410 11411 /* 11412 * Process DOF for any options. This routine assumes that the DOF has been 11413 * at least processed by dtrace_dof_slurp(). 11414 */ 11415 static int 11416 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 11417 { 11418 int i, rval; 11419 uint32_t entsize; 11420 size_t offs; 11421 dof_optdesc_t *desc; 11422 11423 for (i = 0; i < dof->dofh_secnum; i++) { 11424 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 11425 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11426 11427 if (sec->dofs_type != DOF_SECT_OPTDESC) 11428 continue; 11429 11430 if (sec->dofs_align != sizeof (uint64_t)) { 11431 dtrace_dof_error(dof, "bad alignment in " 11432 "option description"); 11433 return (EINVAL); 11434 } 11435 11436 if ((entsize = sec->dofs_entsize) == 0) { 11437 dtrace_dof_error(dof, "zeroed option entry size"); 11438 return (EINVAL); 11439 } 11440 11441 if (entsize < sizeof (dof_optdesc_t)) { 11442 dtrace_dof_error(dof, "bad option entry size"); 11443 return (EINVAL); 11444 } 11445 11446 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 11447 desc = (dof_optdesc_t *)((uintptr_t)dof + 11448 (uintptr_t)sec->dofs_offset + offs); 11449 11450 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 11451 dtrace_dof_error(dof, "non-zero option string"); 11452 return (EINVAL); 11453 } 11454 11455 if (desc->dofo_value == DTRACEOPT_UNSET) { 11456 dtrace_dof_error(dof, "unset option"); 11457 return (EINVAL); 11458 } 11459 11460 if ((rval = dtrace_state_option(state, 11461 desc->dofo_option, desc->dofo_value)) != 0) { 11462 dtrace_dof_error(dof, "rejected option"); 11463 return (rval); 11464 } 11465 } 11466 } 11467 11468 return (0); 11469 } 11470 11471 /* 11472 * DTrace Consumer State Functions 11473 */ 11474 int 11475 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 11476 { 11477 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 11478 void *base; 11479 uintptr_t limit; 11480 dtrace_dynvar_t *dvar, *next, *start; 11481 int i; 11482 11483 ASSERT(MUTEX_HELD(&dtrace_lock)); 11484 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 11485 11486 bzero(dstate, sizeof (dtrace_dstate_t)); 11487 11488 if ((dstate->dtds_chunksize = chunksize) == 0) 11489 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 11490 11491 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 11492 size = min; 11493 11494 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 11495 return (ENOMEM); 11496 11497 dstate->dtds_size = size; 11498 dstate->dtds_base = base; 11499 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 11500 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 11501 11502 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 11503 11504 if (hashsize != 1 && (hashsize & 1)) 11505 hashsize--; 11506 11507 dstate->dtds_hashsize = hashsize; 11508 dstate->dtds_hash = dstate->dtds_base; 11509 11510 /* 11511 * Set all of our hash buckets to point to the single sink, and (if 11512 * it hasn't already been set), set the sink's hash value to be the 11513 * sink sentinel value. The sink is needed for dynamic variable 11514 * lookups to know that they have iterated over an entire, valid hash 11515 * chain. 11516 */ 11517 for (i = 0; i < hashsize; i++) 11518 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 11519 11520 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 11521 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 11522 11523 /* 11524 * Determine number of active CPUs. Divide free list evenly among 11525 * active CPUs. 11526 */ 11527 start = (dtrace_dynvar_t *) 11528 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 11529 limit = (uintptr_t)base + size; 11530 11531 maxper = (limit - (uintptr_t)start) / NCPU; 11532 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 11533 11534 for (i = 0; i < NCPU; i++) { 11535 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 11536 11537 /* 11538 * If we don't even have enough chunks to make it once through 11539 * NCPUs, we're just going to allocate everything to the first 11540 * CPU. And if we're on the last CPU, we're going to allocate 11541 * whatever is left over. In either case, we set the limit to 11542 * be the limit of the dynamic variable space. 11543 */ 11544 if (maxper == 0 || i == NCPU - 1) { 11545 limit = (uintptr_t)base + size; 11546 start = NULL; 11547 } else { 11548 limit = (uintptr_t)start + maxper; 11549 start = (dtrace_dynvar_t *)limit; 11550 } 11551 11552 ASSERT(limit <= (uintptr_t)base + size); 11553 11554 for (;;) { 11555 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 11556 dstate->dtds_chunksize); 11557 11558 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 11559 break; 11560 11561 dvar->dtdv_next = next; 11562 dvar = next; 11563 } 11564 11565 if (maxper == 0) 11566 break; 11567 } 11568 11569 return (0); 11570 } 11571 11572 void 11573 dtrace_dstate_fini(dtrace_dstate_t *dstate) 11574 { 11575 ASSERT(MUTEX_HELD(&cpu_lock)); 11576 11577 if (dstate->dtds_base == NULL) 11578 return; 11579 11580 kmem_free(dstate->dtds_base, dstate->dtds_size); 11581 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 11582 } 11583 11584 static void 11585 dtrace_vstate_fini(dtrace_vstate_t *vstate) 11586 { 11587 /* 11588 * Logical XOR, where are you? 11589 */ 11590 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 11591 11592 if (vstate->dtvs_nglobals > 0) { 11593 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 11594 sizeof (dtrace_statvar_t *)); 11595 } 11596 11597 if (vstate->dtvs_ntlocals > 0) { 11598 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 11599 sizeof (dtrace_difv_t)); 11600 } 11601 11602 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 11603 11604 if (vstate->dtvs_nlocals > 0) { 11605 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 11606 sizeof (dtrace_statvar_t *)); 11607 } 11608 } 11609 11610 static void 11611 dtrace_state_clean(dtrace_state_t *state) 11612 { 11613 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 11614 return; 11615 11616 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 11617 dtrace_speculation_clean(state); 11618 } 11619 11620 static void 11621 dtrace_state_deadman(dtrace_state_t *state) 11622 { 11623 hrtime_t now; 11624 11625 dtrace_sync(); 11626 11627 now = dtrace_gethrtime(); 11628 11629 if (state != dtrace_anon.dta_state && 11630 now - state->dts_laststatus >= dtrace_deadman_user) 11631 return; 11632 11633 /* 11634 * We must be sure that dts_alive never appears to be less than the 11635 * value upon entry to dtrace_state_deadman(), and because we lack a 11636 * dtrace_cas64(), we cannot store to it atomically. We thus instead 11637 * store INT64_MAX to it, followed by a memory barrier, followed by 11638 * the new value. This assures that dts_alive never appears to be 11639 * less than its true value, regardless of the order in which the 11640 * stores to the underlying storage are issued. 11641 */ 11642 state->dts_alive = INT64_MAX; 11643 dtrace_membar_producer(); 11644 state->dts_alive = now; 11645 } 11646 11647 dtrace_state_t * 11648 dtrace_state_create(dev_t *devp, cred_t *cr) 11649 { 11650 minor_t minor; 11651 major_t major; 11652 char c[30]; 11653 dtrace_state_t *state; 11654 dtrace_optval_t *opt; 11655 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 11656 11657 ASSERT(MUTEX_HELD(&dtrace_lock)); 11658 ASSERT(MUTEX_HELD(&cpu_lock)); 11659 11660 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 11661 VM_BESTFIT | VM_SLEEP); 11662 11663 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 11664 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11665 return (NULL); 11666 } 11667 11668 state = ddi_get_soft_state(dtrace_softstate, minor); 11669 state->dts_epid = DTRACE_EPIDNONE + 1; 11670 11671 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 11672 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 11673 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 11674 11675 if (devp != NULL) { 11676 major = getemajor(*devp); 11677 } else { 11678 major = ddi_driver_major(dtrace_devi); 11679 } 11680 11681 state->dts_dev = makedevice(major, minor); 11682 11683 if (devp != NULL) 11684 *devp = state->dts_dev; 11685 11686 /* 11687 * We allocate NCPU buffers. On the one hand, this can be quite 11688 * a bit of memory per instance (nearly 36K on a Starcat). On the 11689 * other hand, it saves an additional memory reference in the probe 11690 * path. 11691 */ 11692 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 11693 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 11694 state->dts_cleaner = CYCLIC_NONE; 11695 state->dts_deadman = CYCLIC_NONE; 11696 state->dts_vstate.dtvs_state = state; 11697 11698 for (i = 0; i < DTRACEOPT_MAX; i++) 11699 state->dts_options[i] = DTRACEOPT_UNSET; 11700 11701 /* 11702 * Set the default options. 11703 */ 11704 opt = state->dts_options; 11705 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 11706 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 11707 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 11708 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 11709 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 11710 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 11711 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 11712 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 11713 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 11714 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 11715 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 11716 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 11717 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 11718 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 11719 11720 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 11721 11722 /* 11723 * Depending on the user credentials, we set flag bits which alter probe 11724 * visibility or the amount of destructiveness allowed. In the case of 11725 * actual anonymous tracing, or the possession of all privileges, all of 11726 * the normal checks are bypassed. 11727 */ 11728 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 11729 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 11730 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 11731 } else { 11732 /* 11733 * Set up the credentials for this instantiation. We take a 11734 * hold on the credential to prevent it from disappearing on 11735 * us; this in turn prevents the zone_t referenced by this 11736 * credential from disappearing. This means that we can 11737 * examine the credential and the zone from probe context. 11738 */ 11739 crhold(cr); 11740 state->dts_cred.dcr_cred = cr; 11741 11742 /* 11743 * CRA_PROC means "we have *some* privilege for dtrace" and 11744 * unlocks the use of variables like pid, zonename, etc. 11745 */ 11746 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 11747 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11748 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11749 } 11750 11751 /* 11752 * dtrace_user allows use of syscall and profile providers. 11753 * If the user also has proc_owner and/or proc_zone, we 11754 * extend the scope to include additional visibility and 11755 * destructive power. 11756 */ 11757 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 11758 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11759 state->dts_cred.dcr_visible |= 11760 DTRACE_CRV_ALLPROC; 11761 11762 state->dts_cred.dcr_action |= 11763 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11764 } 11765 11766 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 11767 state->dts_cred.dcr_visible |= 11768 DTRACE_CRV_ALLZONE; 11769 11770 state->dts_cred.dcr_action |= 11771 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11772 } 11773 11774 /* 11775 * If we have all privs in whatever zone this is, 11776 * we can do destructive things to processes which 11777 * have altered credentials. 11778 */ 11779 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11780 cr->cr_zone->zone_privset)) { 11781 state->dts_cred.dcr_action |= 11782 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11783 } 11784 } 11785 11786 /* 11787 * Holding the dtrace_kernel privilege also implies that 11788 * the user has the dtrace_user privilege from a visibility 11789 * perspective. But without further privileges, some 11790 * destructive actions are not available. 11791 */ 11792 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 11793 /* 11794 * Make all probes in all zones visible. However, 11795 * this doesn't mean that all actions become available 11796 * to all zones. 11797 */ 11798 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 11799 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 11800 11801 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 11802 DTRACE_CRA_PROC; 11803 /* 11804 * Holding proc_owner means that destructive actions 11805 * for *this* zone are allowed. 11806 */ 11807 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11808 state->dts_cred.dcr_action |= 11809 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11810 11811 /* 11812 * Holding proc_zone means that destructive actions 11813 * for this user/group ID in all zones is allowed. 11814 */ 11815 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11816 state->dts_cred.dcr_action |= 11817 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11818 11819 /* 11820 * If we have all privs in whatever zone this is, 11821 * we can do destructive things to processes which 11822 * have altered credentials. 11823 */ 11824 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11825 cr->cr_zone->zone_privset)) { 11826 state->dts_cred.dcr_action |= 11827 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11828 } 11829 } 11830 11831 /* 11832 * Holding the dtrace_proc privilege gives control over fasttrap 11833 * and pid providers. We need to grant wider destructive 11834 * privileges in the event that the user has proc_owner and/or 11835 * proc_zone. 11836 */ 11837 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11838 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11839 state->dts_cred.dcr_action |= 11840 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11841 11842 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11843 state->dts_cred.dcr_action |= 11844 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11845 } 11846 } 11847 11848 return (state); 11849 } 11850 11851 static int 11852 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 11853 { 11854 dtrace_optval_t *opt = state->dts_options, size; 11855 processorid_t cpu; 11856 int flags = 0, rval; 11857 11858 ASSERT(MUTEX_HELD(&dtrace_lock)); 11859 ASSERT(MUTEX_HELD(&cpu_lock)); 11860 ASSERT(which < DTRACEOPT_MAX); 11861 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 11862 (state == dtrace_anon.dta_state && 11863 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 11864 11865 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 11866 return (0); 11867 11868 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 11869 cpu = opt[DTRACEOPT_CPU]; 11870 11871 if (which == DTRACEOPT_SPECSIZE) 11872 flags |= DTRACEBUF_NOSWITCH; 11873 11874 if (which == DTRACEOPT_BUFSIZE) { 11875 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 11876 flags |= DTRACEBUF_RING; 11877 11878 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 11879 flags |= DTRACEBUF_FILL; 11880 11881 if (state != dtrace_anon.dta_state || 11882 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 11883 flags |= DTRACEBUF_INACTIVE; 11884 } 11885 11886 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 11887 /* 11888 * The size must be 8-byte aligned. If the size is not 8-byte 11889 * aligned, drop it down by the difference. 11890 */ 11891 if (size & (sizeof (uint64_t) - 1)) 11892 size -= size & (sizeof (uint64_t) - 1); 11893 11894 if (size < state->dts_reserve) { 11895 /* 11896 * Buffers always must be large enough to accommodate 11897 * their prereserved space. We return E2BIG instead 11898 * of ENOMEM in this case to allow for user-level 11899 * software to differentiate the cases. 11900 */ 11901 return (E2BIG); 11902 } 11903 11904 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 11905 11906 if (rval != ENOMEM) { 11907 opt[which] = size; 11908 return (rval); 11909 } 11910 11911 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11912 return (rval); 11913 } 11914 11915 return (ENOMEM); 11916 } 11917 11918 static int 11919 dtrace_state_buffers(dtrace_state_t *state) 11920 { 11921 dtrace_speculation_t *spec = state->dts_speculations; 11922 int rval, i; 11923 11924 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 11925 DTRACEOPT_BUFSIZE)) != 0) 11926 return (rval); 11927 11928 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 11929 DTRACEOPT_AGGSIZE)) != 0) 11930 return (rval); 11931 11932 for (i = 0; i < state->dts_nspeculations; i++) { 11933 if ((rval = dtrace_state_buffer(state, 11934 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 11935 return (rval); 11936 } 11937 11938 return (0); 11939 } 11940 11941 static void 11942 dtrace_state_prereserve(dtrace_state_t *state) 11943 { 11944 dtrace_ecb_t *ecb; 11945 dtrace_probe_t *probe; 11946 11947 state->dts_reserve = 0; 11948 11949 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 11950 return; 11951 11952 /* 11953 * If our buffer policy is a "fill" buffer policy, we need to set the 11954 * prereserved space to be the space required by the END probes. 11955 */ 11956 probe = dtrace_probes[dtrace_probeid_end - 1]; 11957 ASSERT(probe != NULL); 11958 11959 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 11960 if (ecb->dte_state != state) 11961 continue; 11962 11963 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 11964 } 11965 } 11966 11967 static int 11968 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 11969 { 11970 dtrace_optval_t *opt = state->dts_options, sz, nspec; 11971 dtrace_speculation_t *spec; 11972 dtrace_buffer_t *buf; 11973 cyc_handler_t hdlr; 11974 cyc_time_t when; 11975 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11976 dtrace_icookie_t cookie; 11977 11978 mutex_enter(&cpu_lock); 11979 mutex_enter(&dtrace_lock); 11980 11981 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 11982 rval = EBUSY; 11983 goto out; 11984 } 11985 11986 /* 11987 * Before we can perform any checks, we must prime all of the 11988 * retained enablings that correspond to this state. 11989 */ 11990 dtrace_enabling_prime(state); 11991 11992 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 11993 rval = EACCES; 11994 goto out; 11995 } 11996 11997 dtrace_state_prereserve(state); 11998 11999 /* 12000 * Now we want to do is try to allocate our speculations. 12001 * We do not automatically resize the number of speculations; if 12002 * this fails, we will fail the operation. 12003 */ 12004 nspec = opt[DTRACEOPT_NSPEC]; 12005 ASSERT(nspec != DTRACEOPT_UNSET); 12006 12007 if (nspec > INT_MAX) { 12008 rval = ENOMEM; 12009 goto out; 12010 } 12011 12012 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 12013 12014 if (spec == NULL) { 12015 rval = ENOMEM; 12016 goto out; 12017 } 12018 12019 state->dts_speculations = spec; 12020 state->dts_nspeculations = (int)nspec; 12021 12022 for (i = 0; i < nspec; i++) { 12023 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 12024 rval = ENOMEM; 12025 goto err; 12026 } 12027 12028 spec[i].dtsp_buffer = buf; 12029 } 12030 12031 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 12032 if (dtrace_anon.dta_state == NULL) { 12033 rval = ENOENT; 12034 goto out; 12035 } 12036 12037 if (state->dts_necbs != 0) { 12038 rval = EALREADY; 12039 goto out; 12040 } 12041 12042 state->dts_anon = dtrace_anon_grab(); 12043 ASSERT(state->dts_anon != NULL); 12044 state = state->dts_anon; 12045 12046 /* 12047 * We want "grabanon" to be set in the grabbed state, so we'll 12048 * copy that option value from the grabbing state into the 12049 * grabbed state. 12050 */ 12051 state->dts_options[DTRACEOPT_GRABANON] = 12052 opt[DTRACEOPT_GRABANON]; 12053 12054 *cpu = dtrace_anon.dta_beganon; 12055 12056 /* 12057 * If the anonymous state is active (as it almost certainly 12058 * is if the anonymous enabling ultimately matched anything), 12059 * we don't allow any further option processing -- but we 12060 * don't return failure. 12061 */ 12062 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12063 goto out; 12064 } 12065 12066 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 12067 opt[DTRACEOPT_AGGSIZE] != 0) { 12068 if (state->dts_aggregations == NULL) { 12069 /* 12070 * We're not going to create an aggregation buffer 12071 * because we don't have any ECBs that contain 12072 * aggregations -- set this option to 0. 12073 */ 12074 opt[DTRACEOPT_AGGSIZE] = 0; 12075 } else { 12076 /* 12077 * If we have an aggregation buffer, we must also have 12078 * a buffer to use as scratch. 12079 */ 12080 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 12081 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 12082 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 12083 } 12084 } 12085 } 12086 12087 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 12088 opt[DTRACEOPT_SPECSIZE] != 0) { 12089 if (!state->dts_speculates) { 12090 /* 12091 * We're not going to create speculation buffers 12092 * because we don't have any ECBs that actually 12093 * speculate -- set the speculation size to 0. 12094 */ 12095 opt[DTRACEOPT_SPECSIZE] = 0; 12096 } 12097 } 12098 12099 /* 12100 * The bare minimum size for any buffer that we're actually going to 12101 * do anything to is sizeof (uint64_t). 12102 */ 12103 sz = sizeof (uint64_t); 12104 12105 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 12106 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 12107 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 12108 /* 12109 * A buffer size has been explicitly set to 0 (or to a size 12110 * that will be adjusted to 0) and we need the space -- we 12111 * need to return failure. We return ENOSPC to differentiate 12112 * it from failing to allocate a buffer due to failure to meet 12113 * the reserve (for which we return E2BIG). 12114 */ 12115 rval = ENOSPC; 12116 goto out; 12117 } 12118 12119 if ((rval = dtrace_state_buffers(state)) != 0) 12120 goto err; 12121 12122 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 12123 sz = dtrace_dstate_defsize; 12124 12125 do { 12126 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 12127 12128 if (rval == 0) 12129 break; 12130 12131 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12132 goto err; 12133 } while (sz >>= 1); 12134 12135 opt[DTRACEOPT_DYNVARSIZE] = sz; 12136 12137 if (rval != 0) 12138 goto err; 12139 12140 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 12141 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 12142 12143 if (opt[DTRACEOPT_CLEANRATE] == 0) 12144 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12145 12146 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 12147 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 12148 12149 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 12150 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12151 12152 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 12153 hdlr.cyh_arg = state; 12154 hdlr.cyh_level = CY_LOW_LEVEL; 12155 12156 when.cyt_when = 0; 12157 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 12158 12159 state->dts_cleaner = cyclic_add(&hdlr, &when); 12160 12161 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 12162 hdlr.cyh_arg = state; 12163 hdlr.cyh_level = CY_LOW_LEVEL; 12164 12165 when.cyt_when = 0; 12166 when.cyt_interval = dtrace_deadman_interval; 12167 12168 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 12169 state->dts_deadman = cyclic_add(&hdlr, &when); 12170 12171 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 12172 12173 /* 12174 * Now it's time to actually fire the BEGIN probe. We need to disable 12175 * interrupts here both to record the CPU on which we fired the BEGIN 12176 * probe (the data from this CPU will be processed first at user 12177 * level) and to manually activate the buffer for this CPU. 12178 */ 12179 cookie = dtrace_interrupt_disable(); 12180 *cpu = CPU->cpu_id; 12181 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 12182 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12183 12184 dtrace_probe(dtrace_probeid_begin, 12185 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12186 dtrace_interrupt_enable(cookie); 12187 /* 12188 * We may have had an exit action from a BEGIN probe; only change our 12189 * state to ACTIVE if we're still in WARMUP. 12190 */ 12191 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 12192 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 12193 12194 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 12195 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 12196 12197 /* 12198 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 12199 * want each CPU to transition its principal buffer out of the 12200 * INACTIVE state. Doing this assures that no CPU will suddenly begin 12201 * processing an ECB halfway down a probe's ECB chain; all CPUs will 12202 * atomically transition from processing none of a state's ECBs to 12203 * processing all of them. 12204 */ 12205 dtrace_xcall(DTRACE_CPUALL, 12206 (dtrace_xcall_t)dtrace_buffer_activate, state); 12207 goto out; 12208 12209 err: 12210 dtrace_buffer_free(state->dts_buffer); 12211 dtrace_buffer_free(state->dts_aggbuffer); 12212 12213 if ((nspec = state->dts_nspeculations) == 0) { 12214 ASSERT(state->dts_speculations == NULL); 12215 goto out; 12216 } 12217 12218 spec = state->dts_speculations; 12219 ASSERT(spec != NULL); 12220 12221 for (i = 0; i < state->dts_nspeculations; i++) { 12222 if ((buf = spec[i].dtsp_buffer) == NULL) 12223 break; 12224 12225 dtrace_buffer_free(buf); 12226 kmem_free(buf, bufsize); 12227 } 12228 12229 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12230 state->dts_nspeculations = 0; 12231 state->dts_speculations = NULL; 12232 12233 out: 12234 mutex_exit(&dtrace_lock); 12235 mutex_exit(&cpu_lock); 12236 12237 return (rval); 12238 } 12239 12240 static int 12241 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 12242 { 12243 dtrace_icookie_t cookie; 12244 12245 ASSERT(MUTEX_HELD(&dtrace_lock)); 12246 12247 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 12248 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 12249 return (EINVAL); 12250 12251 /* 12252 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 12253 * to be sure that every CPU has seen it. See below for the details 12254 * on why this is done. 12255 */ 12256 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 12257 dtrace_sync(); 12258 12259 /* 12260 * By this point, it is impossible for any CPU to be still processing 12261 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 12262 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 12263 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 12264 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 12265 * iff we're in the END probe. 12266 */ 12267 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 12268 dtrace_sync(); 12269 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 12270 12271 /* 12272 * Finally, we can release the reserve and call the END probe. We 12273 * disable interrupts across calling the END probe to allow us to 12274 * return the CPU on which we actually called the END probe. This 12275 * allows user-land to be sure that this CPU's principal buffer is 12276 * processed last. 12277 */ 12278 state->dts_reserve = 0; 12279 12280 cookie = dtrace_interrupt_disable(); 12281 *cpu = CPU->cpu_id; 12282 dtrace_probe(dtrace_probeid_end, 12283 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12284 dtrace_interrupt_enable(cookie); 12285 12286 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 12287 dtrace_sync(); 12288 12289 return (0); 12290 } 12291 12292 static int 12293 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 12294 dtrace_optval_t val) 12295 { 12296 ASSERT(MUTEX_HELD(&dtrace_lock)); 12297 12298 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12299 return (EBUSY); 12300 12301 if (option >= DTRACEOPT_MAX) 12302 return (EINVAL); 12303 12304 if (option != DTRACEOPT_CPU && val < 0) 12305 return (EINVAL); 12306 12307 switch (option) { 12308 case DTRACEOPT_DESTRUCTIVE: 12309 if (dtrace_destructive_disallow) 12310 return (EACCES); 12311 12312 state->dts_cred.dcr_destructive = 1; 12313 break; 12314 12315 case DTRACEOPT_BUFSIZE: 12316 case DTRACEOPT_DYNVARSIZE: 12317 case DTRACEOPT_AGGSIZE: 12318 case DTRACEOPT_SPECSIZE: 12319 case DTRACEOPT_STRSIZE: 12320 if (val < 0) 12321 return (EINVAL); 12322 12323 if (val >= LONG_MAX) { 12324 /* 12325 * If this is an otherwise negative value, set it to 12326 * the highest multiple of 128m less than LONG_MAX. 12327 * Technically, we're adjusting the size without 12328 * regard to the buffer resizing policy, but in fact, 12329 * this has no effect -- if we set the buffer size to 12330 * ~LONG_MAX and the buffer policy is ultimately set to 12331 * be "manual", the buffer allocation is guaranteed to 12332 * fail, if only because the allocation requires two 12333 * buffers. (We set the the size to the highest 12334 * multiple of 128m because it ensures that the size 12335 * will remain a multiple of a megabyte when 12336 * repeatedly halved -- all the way down to 15m.) 12337 */ 12338 val = LONG_MAX - (1 << 27) + 1; 12339 } 12340 } 12341 12342 state->dts_options[option] = val; 12343 12344 return (0); 12345 } 12346 12347 static void 12348 dtrace_state_destroy(dtrace_state_t *state) 12349 { 12350 dtrace_ecb_t *ecb; 12351 dtrace_vstate_t *vstate = &state->dts_vstate; 12352 minor_t minor = getminor(state->dts_dev); 12353 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12354 dtrace_speculation_t *spec = state->dts_speculations; 12355 int nspec = state->dts_nspeculations; 12356 uint32_t match; 12357 12358 ASSERT(MUTEX_HELD(&dtrace_lock)); 12359 ASSERT(MUTEX_HELD(&cpu_lock)); 12360 12361 /* 12362 * First, retract any retained enablings for this state. 12363 */ 12364 dtrace_enabling_retract(state); 12365 ASSERT(state->dts_nretained == 0); 12366 12367 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 12368 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 12369 /* 12370 * We have managed to come into dtrace_state_destroy() on a 12371 * hot enabling -- almost certainly because of a disorderly 12372 * shutdown of a consumer. (That is, a consumer that is 12373 * exiting without having called dtrace_stop().) In this case, 12374 * we're going to set our activity to be KILLED, and then 12375 * issue a sync to be sure that everyone is out of probe 12376 * context before we start blowing away ECBs. 12377 */ 12378 state->dts_activity = DTRACE_ACTIVITY_KILLED; 12379 dtrace_sync(); 12380 } 12381 12382 /* 12383 * Release the credential hold we took in dtrace_state_create(). 12384 */ 12385 if (state->dts_cred.dcr_cred != NULL) 12386 crfree(state->dts_cred.dcr_cred); 12387 12388 /* 12389 * Now we can safely disable and destroy any enabled probes. Because 12390 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 12391 * (especially if they're all enabled), we take two passes through the 12392 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 12393 * in the second we disable whatever is left over. 12394 */ 12395 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 12396 for (i = 0; i < state->dts_necbs; i++) { 12397 if ((ecb = state->dts_ecbs[i]) == NULL) 12398 continue; 12399 12400 if (match && ecb->dte_probe != NULL) { 12401 dtrace_probe_t *probe = ecb->dte_probe; 12402 dtrace_provider_t *prov = probe->dtpr_provider; 12403 12404 if (!(prov->dtpv_priv.dtpp_flags & match)) 12405 continue; 12406 } 12407 12408 dtrace_ecb_disable(ecb); 12409 dtrace_ecb_destroy(ecb); 12410 } 12411 12412 if (!match) 12413 break; 12414 } 12415 12416 /* 12417 * Before we free the buffers, perform one more sync to assure that 12418 * every CPU is out of probe context. 12419 */ 12420 dtrace_sync(); 12421 12422 dtrace_buffer_free(state->dts_buffer); 12423 dtrace_buffer_free(state->dts_aggbuffer); 12424 12425 for (i = 0; i < nspec; i++) 12426 dtrace_buffer_free(spec[i].dtsp_buffer); 12427 12428 if (state->dts_cleaner != CYCLIC_NONE) 12429 cyclic_remove(state->dts_cleaner); 12430 12431 if (state->dts_deadman != CYCLIC_NONE) 12432 cyclic_remove(state->dts_deadman); 12433 12434 dtrace_dstate_fini(&vstate->dtvs_dynvars); 12435 dtrace_vstate_fini(vstate); 12436 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 12437 12438 if (state->dts_aggregations != NULL) { 12439 #ifdef DEBUG 12440 for (i = 0; i < state->dts_naggregations; i++) 12441 ASSERT(state->dts_aggregations[i] == NULL); 12442 #endif 12443 ASSERT(state->dts_naggregations > 0); 12444 kmem_free(state->dts_aggregations, 12445 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 12446 } 12447 12448 kmem_free(state->dts_buffer, bufsize); 12449 kmem_free(state->dts_aggbuffer, bufsize); 12450 12451 for (i = 0; i < nspec; i++) 12452 kmem_free(spec[i].dtsp_buffer, bufsize); 12453 12454 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12455 12456 dtrace_format_destroy(state); 12457 12458 vmem_destroy(state->dts_aggid_arena); 12459 ddi_soft_state_free(dtrace_softstate, minor); 12460 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12461 } 12462 12463 /* 12464 * DTrace Anonymous Enabling Functions 12465 */ 12466 static dtrace_state_t * 12467 dtrace_anon_grab(void) 12468 { 12469 dtrace_state_t *state; 12470 12471 ASSERT(MUTEX_HELD(&dtrace_lock)); 12472 12473 if ((state = dtrace_anon.dta_state) == NULL) { 12474 ASSERT(dtrace_anon.dta_enabling == NULL); 12475 return (NULL); 12476 } 12477 12478 ASSERT(dtrace_anon.dta_enabling != NULL); 12479 ASSERT(dtrace_retained != NULL); 12480 12481 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 12482 dtrace_anon.dta_enabling = NULL; 12483 dtrace_anon.dta_state = NULL; 12484 12485 return (state); 12486 } 12487 12488 static void 12489 dtrace_anon_property(void) 12490 { 12491 int i, rv; 12492 dtrace_state_t *state; 12493 dof_hdr_t *dof; 12494 char c[32]; /* enough for "dof-data-" + digits */ 12495 12496 ASSERT(MUTEX_HELD(&dtrace_lock)); 12497 ASSERT(MUTEX_HELD(&cpu_lock)); 12498 12499 for (i = 0; ; i++) { 12500 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 12501 12502 dtrace_err_verbose = 1; 12503 12504 if ((dof = dtrace_dof_property(c)) == NULL) { 12505 dtrace_err_verbose = 0; 12506 break; 12507 } 12508 12509 /* 12510 * We want to create anonymous state, so we need to transition 12511 * the kernel debugger to indicate that DTrace is active. If 12512 * this fails (e.g. because the debugger has modified text in 12513 * some way), we won't continue with the processing. 12514 */ 12515 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12516 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 12517 "enabling ignored."); 12518 dtrace_dof_destroy(dof); 12519 break; 12520 } 12521 12522 /* 12523 * If we haven't allocated an anonymous state, we'll do so now. 12524 */ 12525 if ((state = dtrace_anon.dta_state) == NULL) { 12526 state = dtrace_state_create(NULL, NULL); 12527 dtrace_anon.dta_state = state; 12528 12529 if (state == NULL) { 12530 /* 12531 * This basically shouldn't happen: the only 12532 * failure mode from dtrace_state_create() is a 12533 * failure of ddi_soft_state_zalloc() that 12534 * itself should never happen. Still, the 12535 * interface allows for a failure mode, and 12536 * we want to fail as gracefully as possible: 12537 * we'll emit an error message and cease 12538 * processing anonymous state in this case. 12539 */ 12540 cmn_err(CE_WARN, "failed to create " 12541 "anonymous state"); 12542 dtrace_dof_destroy(dof); 12543 break; 12544 } 12545 } 12546 12547 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 12548 &dtrace_anon.dta_enabling, 0, B_TRUE); 12549 12550 if (rv == 0) 12551 rv = dtrace_dof_options(dof, state); 12552 12553 dtrace_err_verbose = 0; 12554 dtrace_dof_destroy(dof); 12555 12556 if (rv != 0) { 12557 /* 12558 * This is malformed DOF; chuck any anonymous state 12559 * that we created. 12560 */ 12561 ASSERT(dtrace_anon.dta_enabling == NULL); 12562 dtrace_state_destroy(state); 12563 dtrace_anon.dta_state = NULL; 12564 break; 12565 } 12566 12567 ASSERT(dtrace_anon.dta_enabling != NULL); 12568 } 12569 12570 if (dtrace_anon.dta_enabling != NULL) { 12571 int rval; 12572 12573 /* 12574 * dtrace_enabling_retain() can only fail because we are 12575 * trying to retain more enablings than are allowed -- but 12576 * we only have one anonymous enabling, and we are guaranteed 12577 * to be allowed at least one retained enabling; we assert 12578 * that dtrace_enabling_retain() returns success. 12579 */ 12580 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 12581 ASSERT(rval == 0); 12582 12583 dtrace_enabling_dump(dtrace_anon.dta_enabling); 12584 } 12585 } 12586 12587 /* 12588 * DTrace Helper Functions 12589 */ 12590 static void 12591 dtrace_helper_trace(dtrace_helper_action_t *helper, 12592 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 12593 { 12594 uint32_t size, next, nnext, i; 12595 dtrace_helptrace_t *ent; 12596 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12597 12598 if (!dtrace_helptrace_enabled) 12599 return; 12600 12601 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 12602 12603 /* 12604 * What would a tracing framework be without its own tracing 12605 * framework? (Well, a hell of a lot simpler, for starters...) 12606 */ 12607 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 12608 sizeof (uint64_t) - sizeof (uint64_t); 12609 12610 /* 12611 * Iterate until we can allocate a slot in the trace buffer. 12612 */ 12613 do { 12614 next = dtrace_helptrace_next; 12615 12616 if (next + size < dtrace_helptrace_bufsize) { 12617 nnext = next + size; 12618 } else { 12619 nnext = size; 12620 } 12621 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 12622 12623 /* 12624 * We have our slot; fill it in. 12625 */ 12626 if (nnext == size) 12627 next = 0; 12628 12629 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 12630 ent->dtht_helper = helper; 12631 ent->dtht_where = where; 12632 ent->dtht_nlocals = vstate->dtvs_nlocals; 12633 12634 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 12635 mstate->dtms_fltoffs : -1; 12636 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 12637 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 12638 12639 for (i = 0; i < vstate->dtvs_nlocals; i++) { 12640 dtrace_statvar_t *svar; 12641 12642 if ((svar = vstate->dtvs_locals[i]) == NULL) 12643 continue; 12644 12645 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 12646 ent->dtht_locals[i] = 12647 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 12648 } 12649 } 12650 12651 static uint64_t 12652 dtrace_helper(int which, dtrace_mstate_t *mstate, 12653 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 12654 { 12655 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12656 uint64_t sarg0 = mstate->dtms_arg[0]; 12657 uint64_t sarg1 = mstate->dtms_arg[1]; 12658 uint64_t rval; 12659 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 12660 dtrace_helper_action_t *helper; 12661 dtrace_vstate_t *vstate; 12662 dtrace_difo_t *pred; 12663 int i, trace = dtrace_helptrace_enabled; 12664 12665 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 12666 12667 if (helpers == NULL) 12668 return (0); 12669 12670 if ((helper = helpers->dthps_actions[which]) == NULL) 12671 return (0); 12672 12673 vstate = &helpers->dthps_vstate; 12674 mstate->dtms_arg[0] = arg0; 12675 mstate->dtms_arg[1] = arg1; 12676 12677 /* 12678 * Now iterate over each helper. If its predicate evaluates to 'true', 12679 * we'll call the corresponding actions. Note that the below calls 12680 * to dtrace_dif_emulate() may set faults in machine state. This is 12681 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 12682 * the stored DIF offset with its own (which is the desired behavior). 12683 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 12684 * from machine state; this is okay, too. 12685 */ 12686 for (; helper != NULL; helper = helper->dtha_next) { 12687 if ((pred = helper->dtha_predicate) != NULL) { 12688 if (trace) 12689 dtrace_helper_trace(helper, mstate, vstate, 0); 12690 12691 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 12692 goto next; 12693 12694 if (*flags & CPU_DTRACE_FAULT) 12695 goto err; 12696 } 12697 12698 for (i = 0; i < helper->dtha_nactions; i++) { 12699 if (trace) 12700 dtrace_helper_trace(helper, 12701 mstate, vstate, i + 1); 12702 12703 rval = dtrace_dif_emulate(helper->dtha_actions[i], 12704 mstate, vstate, state); 12705 12706 if (*flags & CPU_DTRACE_FAULT) 12707 goto err; 12708 } 12709 12710 next: 12711 if (trace) 12712 dtrace_helper_trace(helper, mstate, vstate, 12713 DTRACE_HELPTRACE_NEXT); 12714 } 12715 12716 if (trace) 12717 dtrace_helper_trace(helper, mstate, vstate, 12718 DTRACE_HELPTRACE_DONE); 12719 12720 /* 12721 * Restore the arg0 that we saved upon entry. 12722 */ 12723 mstate->dtms_arg[0] = sarg0; 12724 mstate->dtms_arg[1] = sarg1; 12725 12726 return (rval); 12727 12728 err: 12729 if (trace) 12730 dtrace_helper_trace(helper, mstate, vstate, 12731 DTRACE_HELPTRACE_ERR); 12732 12733 /* 12734 * Restore the arg0 that we saved upon entry. 12735 */ 12736 mstate->dtms_arg[0] = sarg0; 12737 mstate->dtms_arg[1] = sarg1; 12738 12739 return (NULL); 12740 } 12741 12742 static void 12743 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 12744 dtrace_vstate_t *vstate) 12745 { 12746 int i; 12747 12748 if (helper->dtha_predicate != NULL) 12749 dtrace_difo_release(helper->dtha_predicate, vstate); 12750 12751 for (i = 0; i < helper->dtha_nactions; i++) { 12752 ASSERT(helper->dtha_actions[i] != NULL); 12753 dtrace_difo_release(helper->dtha_actions[i], vstate); 12754 } 12755 12756 kmem_free(helper->dtha_actions, 12757 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 12758 kmem_free(helper, sizeof (dtrace_helper_action_t)); 12759 } 12760 12761 static int 12762 dtrace_helper_destroygen(int gen) 12763 { 12764 proc_t *p = curproc; 12765 dtrace_helpers_t *help = p->p_dtrace_helpers; 12766 dtrace_vstate_t *vstate; 12767 int i; 12768 12769 ASSERT(MUTEX_HELD(&dtrace_lock)); 12770 12771 if (help == NULL || gen > help->dthps_generation) 12772 return (EINVAL); 12773 12774 vstate = &help->dthps_vstate; 12775 12776 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12777 dtrace_helper_action_t *last = NULL, *h, *next; 12778 12779 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12780 next = h->dtha_next; 12781 12782 if (h->dtha_generation == gen) { 12783 if (last != NULL) { 12784 last->dtha_next = next; 12785 } else { 12786 help->dthps_actions[i] = next; 12787 } 12788 12789 dtrace_helper_action_destroy(h, vstate); 12790 } else { 12791 last = h; 12792 } 12793 } 12794 } 12795 12796 /* 12797 * Interate until we've cleared out all helper providers with the 12798 * given generation number. 12799 */ 12800 for (;;) { 12801 dtrace_helper_provider_t *prov; 12802 12803 /* 12804 * Look for a helper provider with the right generation. We 12805 * have to start back at the beginning of the list each time 12806 * because we drop dtrace_lock. It's unlikely that we'll make 12807 * more than two passes. 12808 */ 12809 for (i = 0; i < help->dthps_nprovs; i++) { 12810 prov = help->dthps_provs[i]; 12811 12812 if (prov->dthp_generation == gen) 12813 break; 12814 } 12815 12816 /* 12817 * If there were no matches, we're done. 12818 */ 12819 if (i == help->dthps_nprovs) 12820 break; 12821 12822 /* 12823 * Move the last helper provider into this slot. 12824 */ 12825 help->dthps_nprovs--; 12826 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 12827 help->dthps_provs[help->dthps_nprovs] = NULL; 12828 12829 mutex_exit(&dtrace_lock); 12830 12831 /* 12832 * If we have a meta provider, remove this helper provider. 12833 */ 12834 mutex_enter(&dtrace_meta_lock); 12835 if (dtrace_meta_pid != NULL) { 12836 ASSERT(dtrace_deferred_pid == NULL); 12837 dtrace_helper_provider_remove(&prov->dthp_prov, 12838 p->p_pid); 12839 } 12840 mutex_exit(&dtrace_meta_lock); 12841 12842 dtrace_helper_provider_destroy(prov); 12843 12844 mutex_enter(&dtrace_lock); 12845 } 12846 12847 return (0); 12848 } 12849 12850 static int 12851 dtrace_helper_validate(dtrace_helper_action_t *helper) 12852 { 12853 int err = 0, i; 12854 dtrace_difo_t *dp; 12855 12856 if ((dp = helper->dtha_predicate) != NULL) 12857 err += dtrace_difo_validate_helper(dp); 12858 12859 for (i = 0; i < helper->dtha_nactions; i++) 12860 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 12861 12862 return (err == 0); 12863 } 12864 12865 static int 12866 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 12867 { 12868 dtrace_helpers_t *help; 12869 dtrace_helper_action_t *helper, *last; 12870 dtrace_actdesc_t *act; 12871 dtrace_vstate_t *vstate; 12872 dtrace_predicate_t *pred; 12873 int count = 0, nactions = 0, i; 12874 12875 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 12876 return (EINVAL); 12877 12878 help = curproc->p_dtrace_helpers; 12879 last = help->dthps_actions[which]; 12880 vstate = &help->dthps_vstate; 12881 12882 for (count = 0; last != NULL; last = last->dtha_next) { 12883 count++; 12884 if (last->dtha_next == NULL) 12885 break; 12886 } 12887 12888 /* 12889 * If we already have dtrace_helper_actions_max helper actions for this 12890 * helper action type, we'll refuse to add a new one. 12891 */ 12892 if (count >= dtrace_helper_actions_max) 12893 return (ENOSPC); 12894 12895 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 12896 helper->dtha_generation = help->dthps_generation; 12897 12898 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 12899 ASSERT(pred->dtp_difo != NULL); 12900 dtrace_difo_hold(pred->dtp_difo); 12901 helper->dtha_predicate = pred->dtp_difo; 12902 } 12903 12904 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 12905 if (act->dtad_kind != DTRACEACT_DIFEXPR) 12906 goto err; 12907 12908 if (act->dtad_difo == NULL) 12909 goto err; 12910 12911 nactions++; 12912 } 12913 12914 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 12915 (helper->dtha_nactions = nactions), KM_SLEEP); 12916 12917 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 12918 dtrace_difo_hold(act->dtad_difo); 12919 helper->dtha_actions[i++] = act->dtad_difo; 12920 } 12921 12922 if (!dtrace_helper_validate(helper)) 12923 goto err; 12924 12925 if (last == NULL) { 12926 help->dthps_actions[which] = helper; 12927 } else { 12928 last->dtha_next = helper; 12929 } 12930 12931 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 12932 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 12933 dtrace_helptrace_next = 0; 12934 } 12935 12936 return (0); 12937 err: 12938 dtrace_helper_action_destroy(helper, vstate); 12939 return (EINVAL); 12940 } 12941 12942 static void 12943 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 12944 dof_helper_t *dofhp) 12945 { 12946 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 12947 12948 mutex_enter(&dtrace_meta_lock); 12949 mutex_enter(&dtrace_lock); 12950 12951 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 12952 /* 12953 * If the dtrace module is loaded but not attached, or if 12954 * there aren't isn't a meta provider registered to deal with 12955 * these provider descriptions, we need to postpone creating 12956 * the actual providers until later. 12957 */ 12958 12959 if (help->dthps_next == NULL && help->dthps_prev == NULL && 12960 dtrace_deferred_pid != help) { 12961 help->dthps_deferred = 1; 12962 help->dthps_pid = p->p_pid; 12963 help->dthps_next = dtrace_deferred_pid; 12964 help->dthps_prev = NULL; 12965 if (dtrace_deferred_pid != NULL) 12966 dtrace_deferred_pid->dthps_prev = help; 12967 dtrace_deferred_pid = help; 12968 } 12969 12970 mutex_exit(&dtrace_lock); 12971 12972 } else if (dofhp != NULL) { 12973 /* 12974 * If the dtrace module is loaded and we have a particular 12975 * helper provider description, pass that off to the 12976 * meta provider. 12977 */ 12978 12979 mutex_exit(&dtrace_lock); 12980 12981 dtrace_helper_provide(dofhp, p->p_pid); 12982 12983 } else { 12984 /* 12985 * Otherwise, just pass all the helper provider descriptions 12986 * off to the meta provider. 12987 */ 12988 12989 int i; 12990 mutex_exit(&dtrace_lock); 12991 12992 for (i = 0; i < help->dthps_nprovs; i++) { 12993 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 12994 p->p_pid); 12995 } 12996 } 12997 12998 mutex_exit(&dtrace_meta_lock); 12999 } 13000 13001 static int 13002 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 13003 { 13004 dtrace_helpers_t *help; 13005 dtrace_helper_provider_t *hprov, **tmp_provs; 13006 uint_t tmp_maxprovs, i; 13007 13008 ASSERT(MUTEX_HELD(&dtrace_lock)); 13009 13010 help = curproc->p_dtrace_helpers; 13011 ASSERT(help != NULL); 13012 13013 /* 13014 * If we already have dtrace_helper_providers_max helper providers, 13015 * we're refuse to add a new one. 13016 */ 13017 if (help->dthps_nprovs >= dtrace_helper_providers_max) 13018 return (ENOSPC); 13019 13020 /* 13021 * Check to make sure this isn't a duplicate. 13022 */ 13023 for (i = 0; i < help->dthps_nprovs; i++) { 13024 if (dofhp->dofhp_addr == 13025 help->dthps_provs[i]->dthp_prov.dofhp_addr) 13026 return (EALREADY); 13027 } 13028 13029 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 13030 hprov->dthp_prov = *dofhp; 13031 hprov->dthp_ref = 1; 13032 hprov->dthp_generation = gen; 13033 13034 /* 13035 * Allocate a bigger table for helper providers if it's already full. 13036 */ 13037 if (help->dthps_maxprovs == help->dthps_nprovs) { 13038 tmp_maxprovs = help->dthps_maxprovs; 13039 tmp_provs = help->dthps_provs; 13040 13041 if (help->dthps_maxprovs == 0) 13042 help->dthps_maxprovs = 2; 13043 else 13044 help->dthps_maxprovs *= 2; 13045 if (help->dthps_maxprovs > dtrace_helper_providers_max) 13046 help->dthps_maxprovs = dtrace_helper_providers_max; 13047 13048 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 13049 13050 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 13051 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13052 13053 if (tmp_provs != NULL) { 13054 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 13055 sizeof (dtrace_helper_provider_t *)); 13056 kmem_free(tmp_provs, tmp_maxprovs * 13057 sizeof (dtrace_helper_provider_t *)); 13058 } 13059 } 13060 13061 help->dthps_provs[help->dthps_nprovs] = hprov; 13062 help->dthps_nprovs++; 13063 13064 return (0); 13065 } 13066 13067 static void 13068 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 13069 { 13070 mutex_enter(&dtrace_lock); 13071 13072 if (--hprov->dthp_ref == 0) { 13073 dof_hdr_t *dof; 13074 mutex_exit(&dtrace_lock); 13075 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 13076 dtrace_dof_destroy(dof); 13077 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 13078 } else { 13079 mutex_exit(&dtrace_lock); 13080 } 13081 } 13082 13083 static int 13084 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 13085 { 13086 uintptr_t daddr = (uintptr_t)dof; 13087 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 13088 dof_provider_t *provider; 13089 dof_probe_t *probe; 13090 uint8_t *arg; 13091 char *strtab, *typestr; 13092 dof_stridx_t typeidx; 13093 size_t typesz; 13094 uint_t nprobes, j, k; 13095 13096 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 13097 13098 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 13099 dtrace_dof_error(dof, "misaligned section offset"); 13100 return (-1); 13101 } 13102 13103 /* 13104 * The section needs to be large enough to contain the DOF provider 13105 * structure appropriate for the given version. 13106 */ 13107 if (sec->dofs_size < 13108 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 13109 offsetof(dof_provider_t, dofpv_prenoffs) : 13110 sizeof (dof_provider_t))) { 13111 dtrace_dof_error(dof, "provider section too small"); 13112 return (-1); 13113 } 13114 13115 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 13116 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 13117 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 13118 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 13119 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 13120 13121 if (str_sec == NULL || prb_sec == NULL || 13122 arg_sec == NULL || off_sec == NULL) 13123 return (-1); 13124 13125 enoff_sec = NULL; 13126 13127 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13128 provider->dofpv_prenoffs != DOF_SECT_NONE && 13129 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 13130 provider->dofpv_prenoffs)) == NULL) 13131 return (-1); 13132 13133 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 13134 13135 if (provider->dofpv_name >= str_sec->dofs_size || 13136 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 13137 dtrace_dof_error(dof, "invalid provider name"); 13138 return (-1); 13139 } 13140 13141 if (prb_sec->dofs_entsize == 0 || 13142 prb_sec->dofs_entsize > prb_sec->dofs_size) { 13143 dtrace_dof_error(dof, "invalid entry size"); 13144 return (-1); 13145 } 13146 13147 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 13148 dtrace_dof_error(dof, "misaligned entry size"); 13149 return (-1); 13150 } 13151 13152 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 13153 dtrace_dof_error(dof, "invalid entry size"); 13154 return (-1); 13155 } 13156 13157 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 13158 dtrace_dof_error(dof, "misaligned section offset"); 13159 return (-1); 13160 } 13161 13162 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 13163 dtrace_dof_error(dof, "invalid entry size"); 13164 return (-1); 13165 } 13166 13167 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 13168 13169 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 13170 13171 /* 13172 * Take a pass through the probes to check for errors. 13173 */ 13174 for (j = 0; j < nprobes; j++) { 13175 probe = (dof_probe_t *)(uintptr_t)(daddr + 13176 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 13177 13178 if (probe->dofpr_func >= str_sec->dofs_size) { 13179 dtrace_dof_error(dof, "invalid function name"); 13180 return (-1); 13181 } 13182 13183 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 13184 dtrace_dof_error(dof, "function name too long"); 13185 return (-1); 13186 } 13187 13188 if (probe->dofpr_name >= str_sec->dofs_size || 13189 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 13190 dtrace_dof_error(dof, "invalid probe name"); 13191 return (-1); 13192 } 13193 13194 /* 13195 * The offset count must not wrap the index, and the offsets 13196 * must also not overflow the section's data. 13197 */ 13198 if (probe->dofpr_offidx + probe->dofpr_noffs < 13199 probe->dofpr_offidx || 13200 (probe->dofpr_offidx + probe->dofpr_noffs) * 13201 off_sec->dofs_entsize > off_sec->dofs_size) { 13202 dtrace_dof_error(dof, "invalid probe offset"); 13203 return (-1); 13204 } 13205 13206 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 13207 /* 13208 * If there's no is-enabled offset section, make sure 13209 * there aren't any is-enabled offsets. Otherwise 13210 * perform the same checks as for probe offsets 13211 * (immediately above). 13212 */ 13213 if (enoff_sec == NULL) { 13214 if (probe->dofpr_enoffidx != 0 || 13215 probe->dofpr_nenoffs != 0) { 13216 dtrace_dof_error(dof, "is-enabled " 13217 "offsets with null section"); 13218 return (-1); 13219 } 13220 } else if (probe->dofpr_enoffidx + 13221 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 13222 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 13223 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 13224 dtrace_dof_error(dof, "invalid is-enabled " 13225 "offset"); 13226 return (-1); 13227 } 13228 13229 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 13230 dtrace_dof_error(dof, "zero probe and " 13231 "is-enabled offsets"); 13232 return (-1); 13233 } 13234 } else if (probe->dofpr_noffs == 0) { 13235 dtrace_dof_error(dof, "zero probe offsets"); 13236 return (-1); 13237 } 13238 13239 if (probe->dofpr_argidx + probe->dofpr_xargc < 13240 probe->dofpr_argidx || 13241 (probe->dofpr_argidx + probe->dofpr_xargc) * 13242 arg_sec->dofs_entsize > arg_sec->dofs_size) { 13243 dtrace_dof_error(dof, "invalid args"); 13244 return (-1); 13245 } 13246 13247 typeidx = probe->dofpr_nargv; 13248 typestr = strtab + probe->dofpr_nargv; 13249 for (k = 0; k < probe->dofpr_nargc; k++) { 13250 if (typeidx >= str_sec->dofs_size) { 13251 dtrace_dof_error(dof, "bad " 13252 "native argument type"); 13253 return (-1); 13254 } 13255 13256 typesz = strlen(typestr) + 1; 13257 if (typesz > DTRACE_ARGTYPELEN) { 13258 dtrace_dof_error(dof, "native " 13259 "argument type too long"); 13260 return (-1); 13261 } 13262 typeidx += typesz; 13263 typestr += typesz; 13264 } 13265 13266 typeidx = probe->dofpr_xargv; 13267 typestr = strtab + probe->dofpr_xargv; 13268 for (k = 0; k < probe->dofpr_xargc; k++) { 13269 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 13270 dtrace_dof_error(dof, "bad " 13271 "native argument index"); 13272 return (-1); 13273 } 13274 13275 if (typeidx >= str_sec->dofs_size) { 13276 dtrace_dof_error(dof, "bad " 13277 "translated argument type"); 13278 return (-1); 13279 } 13280 13281 typesz = strlen(typestr) + 1; 13282 if (typesz > DTRACE_ARGTYPELEN) { 13283 dtrace_dof_error(dof, "translated argument " 13284 "type too long"); 13285 return (-1); 13286 } 13287 13288 typeidx += typesz; 13289 typestr += typesz; 13290 } 13291 } 13292 13293 return (0); 13294 } 13295 13296 static int 13297 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 13298 { 13299 dtrace_helpers_t *help; 13300 dtrace_vstate_t *vstate; 13301 dtrace_enabling_t *enab = NULL; 13302 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 13303 uintptr_t daddr = (uintptr_t)dof; 13304 13305 ASSERT(MUTEX_HELD(&dtrace_lock)); 13306 13307 if ((help = curproc->p_dtrace_helpers) == NULL) 13308 help = dtrace_helpers_create(curproc); 13309 13310 vstate = &help->dthps_vstate; 13311 13312 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 13313 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 13314 dtrace_dof_destroy(dof); 13315 return (rv); 13316 } 13317 13318 /* 13319 * Look for helper providers and validate their descriptions. 13320 */ 13321 if (dhp != NULL) { 13322 for (i = 0; i < dof->dofh_secnum; i++) { 13323 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 13324 dof->dofh_secoff + i * dof->dofh_secsize); 13325 13326 if (sec->dofs_type != DOF_SECT_PROVIDER) 13327 continue; 13328 13329 if (dtrace_helper_provider_validate(dof, sec) != 0) { 13330 dtrace_enabling_destroy(enab); 13331 dtrace_dof_destroy(dof); 13332 return (-1); 13333 } 13334 13335 nprovs++; 13336 } 13337 } 13338 13339 /* 13340 * Now we need to walk through the ECB descriptions in the enabling. 13341 */ 13342 for (i = 0; i < enab->dten_ndesc; i++) { 13343 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 13344 dtrace_probedesc_t *desc = &ep->dted_probe; 13345 13346 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 13347 continue; 13348 13349 if (strcmp(desc->dtpd_mod, "helper") != 0) 13350 continue; 13351 13352 if (strcmp(desc->dtpd_func, "ustack") != 0) 13353 continue; 13354 13355 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 13356 ep)) != 0) { 13357 /* 13358 * Adding this helper action failed -- we are now going 13359 * to rip out the entire generation and return failure. 13360 */ 13361 (void) dtrace_helper_destroygen(help->dthps_generation); 13362 dtrace_enabling_destroy(enab); 13363 dtrace_dof_destroy(dof); 13364 return (-1); 13365 } 13366 13367 nhelpers++; 13368 } 13369 13370 if (nhelpers < enab->dten_ndesc) 13371 dtrace_dof_error(dof, "unmatched helpers"); 13372 13373 gen = help->dthps_generation++; 13374 dtrace_enabling_destroy(enab); 13375 13376 if (dhp != NULL && nprovs > 0) { 13377 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 13378 if (dtrace_helper_provider_add(dhp, gen) == 0) { 13379 mutex_exit(&dtrace_lock); 13380 dtrace_helper_provider_register(curproc, help, dhp); 13381 mutex_enter(&dtrace_lock); 13382 13383 destroy = 0; 13384 } 13385 } 13386 13387 if (destroy) 13388 dtrace_dof_destroy(dof); 13389 13390 return (gen); 13391 } 13392 13393 static dtrace_helpers_t * 13394 dtrace_helpers_create(proc_t *p) 13395 { 13396 dtrace_helpers_t *help; 13397 13398 ASSERT(MUTEX_HELD(&dtrace_lock)); 13399 ASSERT(p->p_dtrace_helpers == NULL); 13400 13401 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 13402 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 13403 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 13404 13405 p->p_dtrace_helpers = help; 13406 dtrace_helpers++; 13407 13408 return (help); 13409 } 13410 13411 static void 13412 dtrace_helpers_destroy(void) 13413 { 13414 dtrace_helpers_t *help; 13415 dtrace_vstate_t *vstate; 13416 proc_t *p = curproc; 13417 int i; 13418 13419 mutex_enter(&dtrace_lock); 13420 13421 ASSERT(p->p_dtrace_helpers != NULL); 13422 ASSERT(dtrace_helpers > 0); 13423 13424 help = p->p_dtrace_helpers; 13425 vstate = &help->dthps_vstate; 13426 13427 /* 13428 * We're now going to lose the help from this process. 13429 */ 13430 p->p_dtrace_helpers = NULL; 13431 dtrace_sync(); 13432 13433 /* 13434 * Destory the helper actions. 13435 */ 13436 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13437 dtrace_helper_action_t *h, *next; 13438 13439 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13440 next = h->dtha_next; 13441 dtrace_helper_action_destroy(h, vstate); 13442 h = next; 13443 } 13444 } 13445 13446 mutex_exit(&dtrace_lock); 13447 13448 /* 13449 * Destroy the helper providers. 13450 */ 13451 if (help->dthps_maxprovs > 0) { 13452 mutex_enter(&dtrace_meta_lock); 13453 if (dtrace_meta_pid != NULL) { 13454 ASSERT(dtrace_deferred_pid == NULL); 13455 13456 for (i = 0; i < help->dthps_nprovs; i++) { 13457 dtrace_helper_provider_remove( 13458 &help->dthps_provs[i]->dthp_prov, p->p_pid); 13459 } 13460 } else { 13461 mutex_enter(&dtrace_lock); 13462 ASSERT(help->dthps_deferred == 0 || 13463 help->dthps_next != NULL || 13464 help->dthps_prev != NULL || 13465 help == dtrace_deferred_pid); 13466 13467 /* 13468 * Remove the helper from the deferred list. 13469 */ 13470 if (help->dthps_next != NULL) 13471 help->dthps_next->dthps_prev = help->dthps_prev; 13472 if (help->dthps_prev != NULL) 13473 help->dthps_prev->dthps_next = help->dthps_next; 13474 if (dtrace_deferred_pid == help) { 13475 dtrace_deferred_pid = help->dthps_next; 13476 ASSERT(help->dthps_prev == NULL); 13477 } 13478 13479 mutex_exit(&dtrace_lock); 13480 } 13481 13482 mutex_exit(&dtrace_meta_lock); 13483 13484 for (i = 0; i < help->dthps_nprovs; i++) { 13485 dtrace_helper_provider_destroy(help->dthps_provs[i]); 13486 } 13487 13488 kmem_free(help->dthps_provs, help->dthps_maxprovs * 13489 sizeof (dtrace_helper_provider_t *)); 13490 } 13491 13492 mutex_enter(&dtrace_lock); 13493 13494 dtrace_vstate_fini(&help->dthps_vstate); 13495 kmem_free(help->dthps_actions, 13496 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 13497 kmem_free(help, sizeof (dtrace_helpers_t)); 13498 13499 --dtrace_helpers; 13500 mutex_exit(&dtrace_lock); 13501 } 13502 13503 static void 13504 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 13505 { 13506 dtrace_helpers_t *help, *newhelp; 13507 dtrace_helper_action_t *helper, *new, *last; 13508 dtrace_difo_t *dp; 13509 dtrace_vstate_t *vstate; 13510 int i, j, sz, hasprovs = 0; 13511 13512 mutex_enter(&dtrace_lock); 13513 ASSERT(from->p_dtrace_helpers != NULL); 13514 ASSERT(dtrace_helpers > 0); 13515 13516 help = from->p_dtrace_helpers; 13517 newhelp = dtrace_helpers_create(to); 13518 ASSERT(to->p_dtrace_helpers != NULL); 13519 13520 newhelp->dthps_generation = help->dthps_generation; 13521 vstate = &newhelp->dthps_vstate; 13522 13523 /* 13524 * Duplicate the helper actions. 13525 */ 13526 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13527 if ((helper = help->dthps_actions[i]) == NULL) 13528 continue; 13529 13530 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 13531 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 13532 KM_SLEEP); 13533 new->dtha_generation = helper->dtha_generation; 13534 13535 if ((dp = helper->dtha_predicate) != NULL) { 13536 dp = dtrace_difo_duplicate(dp, vstate); 13537 new->dtha_predicate = dp; 13538 } 13539 13540 new->dtha_nactions = helper->dtha_nactions; 13541 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 13542 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 13543 13544 for (j = 0; j < new->dtha_nactions; j++) { 13545 dtrace_difo_t *dp = helper->dtha_actions[j]; 13546 13547 ASSERT(dp != NULL); 13548 dp = dtrace_difo_duplicate(dp, vstate); 13549 new->dtha_actions[j] = dp; 13550 } 13551 13552 if (last != NULL) { 13553 last->dtha_next = new; 13554 } else { 13555 newhelp->dthps_actions[i] = new; 13556 } 13557 13558 last = new; 13559 } 13560 } 13561 13562 /* 13563 * Duplicate the helper providers and register them with the 13564 * DTrace framework. 13565 */ 13566 if (help->dthps_nprovs > 0) { 13567 newhelp->dthps_nprovs = help->dthps_nprovs; 13568 newhelp->dthps_maxprovs = help->dthps_nprovs; 13569 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 13570 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13571 for (i = 0; i < newhelp->dthps_nprovs; i++) { 13572 newhelp->dthps_provs[i] = help->dthps_provs[i]; 13573 newhelp->dthps_provs[i]->dthp_ref++; 13574 } 13575 13576 hasprovs = 1; 13577 } 13578 13579 mutex_exit(&dtrace_lock); 13580 13581 if (hasprovs) 13582 dtrace_helper_provider_register(to, newhelp, NULL); 13583 } 13584 13585 /* 13586 * DTrace Hook Functions 13587 */ 13588 static void 13589 dtrace_module_loaded(struct modctl *ctl) 13590 { 13591 dtrace_provider_t *prv; 13592 13593 mutex_enter(&dtrace_provider_lock); 13594 mutex_enter(&mod_lock); 13595 13596 ASSERT(ctl->mod_busy); 13597 13598 /* 13599 * We're going to call each providers per-module provide operation 13600 * specifying only this module. 13601 */ 13602 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 13603 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 13604 13605 mutex_exit(&mod_lock); 13606 mutex_exit(&dtrace_provider_lock); 13607 13608 /* 13609 * If we have any retained enablings, we need to match against them. 13610 * Enabling probes requires that cpu_lock be held, and we cannot hold 13611 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 13612 * module. (In particular, this happens when loading scheduling 13613 * classes.) So if we have any retained enablings, we need to dispatch 13614 * our task queue to do the match for us. 13615 */ 13616 mutex_enter(&dtrace_lock); 13617 13618 if (dtrace_retained == NULL) { 13619 mutex_exit(&dtrace_lock); 13620 return; 13621 } 13622 13623 (void) taskq_dispatch(dtrace_taskq, 13624 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 13625 13626 mutex_exit(&dtrace_lock); 13627 13628 /* 13629 * And now, for a little heuristic sleaze: in general, we want to 13630 * match modules as soon as they load. However, we cannot guarantee 13631 * this, because it would lead us to the lock ordering violation 13632 * outlined above. The common case, of course, is that cpu_lock is 13633 * _not_ held -- so we delay here for a clock tick, hoping that that's 13634 * long enough for the task queue to do its work. If it's not, it's 13635 * not a serious problem -- it just means that the module that we 13636 * just loaded may not be immediately instrumentable. 13637 */ 13638 delay(1); 13639 } 13640 13641 static void 13642 dtrace_module_unloaded(struct modctl *ctl) 13643 { 13644 dtrace_probe_t template, *probe, *first, *next; 13645 dtrace_provider_t *prov; 13646 13647 template.dtpr_mod = ctl->mod_modname; 13648 13649 mutex_enter(&dtrace_provider_lock); 13650 mutex_enter(&mod_lock); 13651 mutex_enter(&dtrace_lock); 13652 13653 if (dtrace_bymod == NULL) { 13654 /* 13655 * The DTrace module is loaded (obviously) but not attached; 13656 * we don't have any work to do. 13657 */ 13658 mutex_exit(&dtrace_provider_lock); 13659 mutex_exit(&mod_lock); 13660 mutex_exit(&dtrace_lock); 13661 return; 13662 } 13663 13664 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 13665 probe != NULL; probe = probe->dtpr_nextmod) { 13666 if (probe->dtpr_ecb != NULL) { 13667 mutex_exit(&dtrace_provider_lock); 13668 mutex_exit(&mod_lock); 13669 mutex_exit(&dtrace_lock); 13670 13671 /* 13672 * This shouldn't _actually_ be possible -- we're 13673 * unloading a module that has an enabled probe in it. 13674 * (It's normally up to the provider to make sure that 13675 * this can't happen.) However, because dtps_enable() 13676 * doesn't have a failure mode, there can be an 13677 * enable/unload race. Upshot: we don't want to 13678 * assert, but we're not going to disable the 13679 * probe, either. 13680 */ 13681 if (dtrace_err_verbose) { 13682 cmn_err(CE_WARN, "unloaded module '%s' had " 13683 "enabled probes", ctl->mod_modname); 13684 } 13685 13686 return; 13687 } 13688 } 13689 13690 probe = first; 13691 13692 for (first = NULL; probe != NULL; probe = next) { 13693 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 13694 13695 dtrace_probes[probe->dtpr_id - 1] = NULL; 13696 13697 next = probe->dtpr_nextmod; 13698 dtrace_hash_remove(dtrace_bymod, probe); 13699 dtrace_hash_remove(dtrace_byfunc, probe); 13700 dtrace_hash_remove(dtrace_byname, probe); 13701 13702 if (first == NULL) { 13703 first = probe; 13704 probe->dtpr_nextmod = NULL; 13705 } else { 13706 probe->dtpr_nextmod = first; 13707 first = probe; 13708 } 13709 } 13710 13711 /* 13712 * We've removed all of the module's probes from the hash chains and 13713 * from the probe array. Now issue a dtrace_sync() to be sure that 13714 * everyone has cleared out from any probe array processing. 13715 */ 13716 dtrace_sync(); 13717 13718 for (probe = first; probe != NULL; probe = first) { 13719 first = probe->dtpr_nextmod; 13720 prov = probe->dtpr_provider; 13721 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 13722 probe->dtpr_arg); 13723 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 13724 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 13725 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 13726 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 13727 kmem_free(probe, sizeof (dtrace_probe_t)); 13728 } 13729 13730 mutex_exit(&dtrace_lock); 13731 mutex_exit(&mod_lock); 13732 mutex_exit(&dtrace_provider_lock); 13733 } 13734 13735 void 13736 dtrace_suspend(void) 13737 { 13738 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 13739 } 13740 13741 void 13742 dtrace_resume(void) 13743 { 13744 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 13745 } 13746 13747 static int 13748 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 13749 { 13750 ASSERT(MUTEX_HELD(&cpu_lock)); 13751 mutex_enter(&dtrace_lock); 13752 13753 switch (what) { 13754 case CPU_CONFIG: { 13755 dtrace_state_t *state; 13756 dtrace_optval_t *opt, rs, c; 13757 13758 /* 13759 * For now, we only allocate a new buffer for anonymous state. 13760 */ 13761 if ((state = dtrace_anon.dta_state) == NULL) 13762 break; 13763 13764 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13765 break; 13766 13767 opt = state->dts_options; 13768 c = opt[DTRACEOPT_CPU]; 13769 13770 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 13771 break; 13772 13773 /* 13774 * Regardless of what the actual policy is, we're going to 13775 * temporarily set our resize policy to be manual. We're 13776 * also going to temporarily set our CPU option to denote 13777 * the newly configured CPU. 13778 */ 13779 rs = opt[DTRACEOPT_BUFRESIZE]; 13780 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 13781 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 13782 13783 (void) dtrace_state_buffers(state); 13784 13785 opt[DTRACEOPT_BUFRESIZE] = rs; 13786 opt[DTRACEOPT_CPU] = c; 13787 13788 break; 13789 } 13790 13791 case CPU_UNCONFIG: 13792 /* 13793 * We don't free the buffer in the CPU_UNCONFIG case. (The 13794 * buffer will be freed when the consumer exits.) 13795 */ 13796 break; 13797 13798 default: 13799 break; 13800 } 13801 13802 mutex_exit(&dtrace_lock); 13803 return (0); 13804 } 13805 13806 static void 13807 dtrace_cpu_setup_initial(processorid_t cpu) 13808 { 13809 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 13810 } 13811 13812 static void 13813 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 13814 { 13815 if (dtrace_toxranges >= dtrace_toxranges_max) { 13816 int osize, nsize; 13817 dtrace_toxrange_t *range; 13818 13819 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13820 13821 if (osize == 0) { 13822 ASSERT(dtrace_toxrange == NULL); 13823 ASSERT(dtrace_toxranges_max == 0); 13824 dtrace_toxranges_max = 1; 13825 } else { 13826 dtrace_toxranges_max <<= 1; 13827 } 13828 13829 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13830 range = kmem_zalloc(nsize, KM_SLEEP); 13831 13832 if (dtrace_toxrange != NULL) { 13833 ASSERT(osize != 0); 13834 bcopy(dtrace_toxrange, range, osize); 13835 kmem_free(dtrace_toxrange, osize); 13836 } 13837 13838 dtrace_toxrange = range; 13839 } 13840 13841 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 13842 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 13843 13844 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 13845 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 13846 dtrace_toxranges++; 13847 } 13848 13849 /* 13850 * DTrace Driver Cookbook Functions 13851 */ 13852 /*ARGSUSED*/ 13853 static int 13854 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 13855 { 13856 dtrace_provider_id_t id; 13857 dtrace_state_t *state = NULL; 13858 dtrace_enabling_t *enab; 13859 13860 mutex_enter(&cpu_lock); 13861 mutex_enter(&dtrace_provider_lock); 13862 mutex_enter(&dtrace_lock); 13863 13864 if (ddi_soft_state_init(&dtrace_softstate, 13865 sizeof (dtrace_state_t), 0) != 0) { 13866 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 13867 mutex_exit(&cpu_lock); 13868 mutex_exit(&dtrace_provider_lock); 13869 mutex_exit(&dtrace_lock); 13870 return (DDI_FAILURE); 13871 } 13872 13873 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 13874 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 13875 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 13876 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 13877 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 13878 ddi_remove_minor_node(devi, NULL); 13879 ddi_soft_state_fini(&dtrace_softstate); 13880 mutex_exit(&cpu_lock); 13881 mutex_exit(&dtrace_provider_lock); 13882 mutex_exit(&dtrace_lock); 13883 return (DDI_FAILURE); 13884 } 13885 13886 ddi_report_dev(devi); 13887 dtrace_devi = devi; 13888 13889 dtrace_modload = dtrace_module_loaded; 13890 dtrace_modunload = dtrace_module_unloaded; 13891 dtrace_cpu_init = dtrace_cpu_setup_initial; 13892 dtrace_helpers_cleanup = dtrace_helpers_destroy; 13893 dtrace_helpers_fork = dtrace_helpers_duplicate; 13894 dtrace_cpustart_init = dtrace_suspend; 13895 dtrace_cpustart_fini = dtrace_resume; 13896 dtrace_debugger_init = dtrace_suspend; 13897 dtrace_debugger_fini = dtrace_resume; 13898 dtrace_kreloc_init = dtrace_suspend; 13899 dtrace_kreloc_fini = dtrace_resume; 13900 13901 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13902 13903 ASSERT(MUTEX_HELD(&cpu_lock)); 13904 13905 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 13906 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13907 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 13908 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 13909 VM_SLEEP | VMC_IDENTIFIER); 13910 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 13911 1, INT_MAX, 0); 13912 13913 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 13914 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 13915 NULL, NULL, NULL, NULL, NULL, 0); 13916 13917 ASSERT(MUTEX_HELD(&cpu_lock)); 13918 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 13919 offsetof(dtrace_probe_t, dtpr_nextmod), 13920 offsetof(dtrace_probe_t, dtpr_prevmod)); 13921 13922 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 13923 offsetof(dtrace_probe_t, dtpr_nextfunc), 13924 offsetof(dtrace_probe_t, dtpr_prevfunc)); 13925 13926 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 13927 offsetof(dtrace_probe_t, dtpr_nextname), 13928 offsetof(dtrace_probe_t, dtpr_prevname)); 13929 13930 if (dtrace_retain_max < 1) { 13931 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 13932 "setting to 1", dtrace_retain_max); 13933 dtrace_retain_max = 1; 13934 } 13935 13936 /* 13937 * Now discover our toxic ranges. 13938 */ 13939 dtrace_toxic_ranges(dtrace_toxrange_add); 13940 13941 /* 13942 * Before we register ourselves as a provider to our own framework, 13943 * we would like to assert that dtrace_provider is NULL -- but that's 13944 * not true if we were loaded as a dependency of a DTrace provider. 13945 * Once we've registered, we can assert that dtrace_provider is our 13946 * pseudo provider. 13947 */ 13948 (void) dtrace_register("dtrace", &dtrace_provider_attr, 13949 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 13950 13951 ASSERT(dtrace_provider != NULL); 13952 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 13953 13954 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 13955 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 13956 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 13957 dtrace_provider, NULL, NULL, "END", 0, NULL); 13958 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 13959 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 13960 13961 dtrace_anon_property(); 13962 mutex_exit(&cpu_lock); 13963 13964 /* 13965 * If DTrace helper tracing is enabled, we need to allocate the 13966 * trace buffer and initialize the values. 13967 */ 13968 if (dtrace_helptrace_enabled) { 13969 ASSERT(dtrace_helptrace_buffer == NULL); 13970 dtrace_helptrace_buffer = 13971 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 13972 dtrace_helptrace_next = 0; 13973 } 13974 13975 /* 13976 * If there are already providers, we must ask them to provide their 13977 * probes, and then match any anonymous enabling against them. Note 13978 * that there should be no other retained enablings at this time: 13979 * the only retained enablings at this time should be the anonymous 13980 * enabling. 13981 */ 13982 if (dtrace_anon.dta_enabling != NULL) { 13983 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 13984 13985 dtrace_enabling_provide(NULL); 13986 state = dtrace_anon.dta_state; 13987 13988 /* 13989 * We couldn't hold cpu_lock across the above call to 13990 * dtrace_enabling_provide(), but we must hold it to actually 13991 * enable the probes. We have to drop all of our locks, pick 13992 * up cpu_lock, and regain our locks before matching the 13993 * retained anonymous enabling. 13994 */ 13995 mutex_exit(&dtrace_lock); 13996 mutex_exit(&dtrace_provider_lock); 13997 13998 mutex_enter(&cpu_lock); 13999 mutex_enter(&dtrace_provider_lock); 14000 mutex_enter(&dtrace_lock); 14001 14002 if ((enab = dtrace_anon.dta_enabling) != NULL) 14003 (void) dtrace_enabling_match(enab, NULL); 14004 14005 mutex_exit(&cpu_lock); 14006 } 14007 14008 mutex_exit(&dtrace_lock); 14009 mutex_exit(&dtrace_provider_lock); 14010 14011 if (state != NULL) { 14012 /* 14013 * If we created any anonymous state, set it going now. 14014 */ 14015 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 14016 } 14017 14018 return (DDI_SUCCESS); 14019 } 14020 14021 /*ARGSUSED*/ 14022 static int 14023 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 14024 { 14025 dtrace_state_t *state; 14026 uint32_t priv; 14027 uid_t uid; 14028 zoneid_t zoneid; 14029 14030 if (getminor(*devp) == DTRACEMNRN_HELPER) 14031 return (0); 14032 14033 /* 14034 * If this wasn't an open with the "helper" minor, then it must be 14035 * the "dtrace" minor. 14036 */ 14037 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 14038 14039 /* 14040 * If no DTRACE_PRIV_* bits are set in the credential, then the 14041 * caller lacks sufficient permission to do anything with DTrace. 14042 */ 14043 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 14044 if (priv == DTRACE_PRIV_NONE) 14045 return (EACCES); 14046 14047 /* 14048 * Ask all providers to provide all their probes. 14049 */ 14050 mutex_enter(&dtrace_provider_lock); 14051 dtrace_probe_provide(NULL, NULL); 14052 mutex_exit(&dtrace_provider_lock); 14053 14054 mutex_enter(&cpu_lock); 14055 mutex_enter(&dtrace_lock); 14056 dtrace_opens++; 14057 dtrace_membar_producer(); 14058 14059 /* 14060 * If the kernel debugger is active (that is, if the kernel debugger 14061 * modified text in some way), we won't allow the open. 14062 */ 14063 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14064 dtrace_opens--; 14065 mutex_exit(&cpu_lock); 14066 mutex_exit(&dtrace_lock); 14067 return (EBUSY); 14068 } 14069 14070 state = dtrace_state_create(devp, cred_p); 14071 mutex_exit(&cpu_lock); 14072 14073 if (state == NULL) { 14074 if (--dtrace_opens == 0) 14075 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14076 mutex_exit(&dtrace_lock); 14077 return (EAGAIN); 14078 } 14079 14080 mutex_exit(&dtrace_lock); 14081 14082 return (0); 14083 } 14084 14085 /*ARGSUSED*/ 14086 static int 14087 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 14088 { 14089 minor_t minor = getminor(dev); 14090 dtrace_state_t *state; 14091 14092 if (minor == DTRACEMNRN_HELPER) 14093 return (0); 14094 14095 state = ddi_get_soft_state(dtrace_softstate, minor); 14096 14097 mutex_enter(&cpu_lock); 14098 mutex_enter(&dtrace_lock); 14099 14100 if (state->dts_anon) { 14101 /* 14102 * There is anonymous state. Destroy that first. 14103 */ 14104 ASSERT(dtrace_anon.dta_state == NULL); 14105 dtrace_state_destroy(state->dts_anon); 14106 } 14107 14108 dtrace_state_destroy(state); 14109 ASSERT(dtrace_opens > 0); 14110 if (--dtrace_opens == 0) 14111 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14112 14113 mutex_exit(&dtrace_lock); 14114 mutex_exit(&cpu_lock); 14115 14116 return (0); 14117 } 14118 14119 /*ARGSUSED*/ 14120 static int 14121 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 14122 { 14123 int rval; 14124 dof_helper_t help, *dhp = NULL; 14125 14126 switch (cmd) { 14127 case DTRACEHIOC_ADDDOF: 14128 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 14129 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 14130 return (EFAULT); 14131 } 14132 14133 dhp = &help; 14134 arg = (intptr_t)help.dofhp_dof; 14135 /*FALLTHROUGH*/ 14136 14137 case DTRACEHIOC_ADD: { 14138 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 14139 14140 if (dof == NULL) 14141 return (rval); 14142 14143 mutex_enter(&dtrace_lock); 14144 14145 /* 14146 * dtrace_helper_slurp() takes responsibility for the dof -- 14147 * it may free it now or it may save it and free it later. 14148 */ 14149 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 14150 *rv = rval; 14151 rval = 0; 14152 } else { 14153 rval = EINVAL; 14154 } 14155 14156 mutex_exit(&dtrace_lock); 14157 return (rval); 14158 } 14159 14160 case DTRACEHIOC_REMOVE: { 14161 mutex_enter(&dtrace_lock); 14162 rval = dtrace_helper_destroygen(arg); 14163 mutex_exit(&dtrace_lock); 14164 14165 return (rval); 14166 } 14167 14168 default: 14169 break; 14170 } 14171 14172 return (ENOTTY); 14173 } 14174 14175 /*ARGSUSED*/ 14176 static int 14177 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 14178 { 14179 minor_t minor = getminor(dev); 14180 dtrace_state_t *state; 14181 int rval; 14182 14183 if (minor == DTRACEMNRN_HELPER) 14184 return (dtrace_ioctl_helper(cmd, arg, rv)); 14185 14186 state = ddi_get_soft_state(dtrace_softstate, minor); 14187 14188 if (state->dts_anon) { 14189 ASSERT(dtrace_anon.dta_state == NULL); 14190 state = state->dts_anon; 14191 } 14192 14193 switch (cmd) { 14194 case DTRACEIOC_PROVIDER: { 14195 dtrace_providerdesc_t pvd; 14196 dtrace_provider_t *pvp; 14197 14198 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 14199 return (EFAULT); 14200 14201 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 14202 mutex_enter(&dtrace_provider_lock); 14203 14204 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 14205 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 14206 break; 14207 } 14208 14209 mutex_exit(&dtrace_provider_lock); 14210 14211 if (pvp == NULL) 14212 return (ESRCH); 14213 14214 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 14215 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 14216 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 14217 return (EFAULT); 14218 14219 return (0); 14220 } 14221 14222 case DTRACEIOC_EPROBE: { 14223 dtrace_eprobedesc_t epdesc; 14224 dtrace_ecb_t *ecb; 14225 dtrace_action_t *act; 14226 void *buf; 14227 size_t size; 14228 uintptr_t dest; 14229 int nrecs; 14230 14231 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 14232 return (EFAULT); 14233 14234 mutex_enter(&dtrace_lock); 14235 14236 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 14237 mutex_exit(&dtrace_lock); 14238 return (EINVAL); 14239 } 14240 14241 if (ecb->dte_probe == NULL) { 14242 mutex_exit(&dtrace_lock); 14243 return (EINVAL); 14244 } 14245 14246 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 14247 epdesc.dtepd_uarg = ecb->dte_uarg; 14248 epdesc.dtepd_size = ecb->dte_size; 14249 14250 nrecs = epdesc.dtepd_nrecs; 14251 epdesc.dtepd_nrecs = 0; 14252 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14253 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14254 continue; 14255 14256 epdesc.dtepd_nrecs++; 14257 } 14258 14259 /* 14260 * Now that we have the size, we need to allocate a temporary 14261 * buffer in which to store the complete description. We need 14262 * the temporary buffer to be able to drop dtrace_lock() 14263 * across the copyout(), below. 14264 */ 14265 size = sizeof (dtrace_eprobedesc_t) + 14266 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 14267 14268 buf = kmem_alloc(size, KM_SLEEP); 14269 dest = (uintptr_t)buf; 14270 14271 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 14272 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 14273 14274 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14275 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14276 continue; 14277 14278 if (nrecs-- == 0) 14279 break; 14280 14281 bcopy(&act->dta_rec, (void *)dest, 14282 sizeof (dtrace_recdesc_t)); 14283 dest += sizeof (dtrace_recdesc_t); 14284 } 14285 14286 mutex_exit(&dtrace_lock); 14287 14288 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14289 kmem_free(buf, size); 14290 return (EFAULT); 14291 } 14292 14293 kmem_free(buf, size); 14294 return (0); 14295 } 14296 14297 case DTRACEIOC_AGGDESC: { 14298 dtrace_aggdesc_t aggdesc; 14299 dtrace_action_t *act; 14300 dtrace_aggregation_t *agg; 14301 int nrecs; 14302 uint32_t offs; 14303 dtrace_recdesc_t *lrec; 14304 void *buf; 14305 size_t size; 14306 uintptr_t dest; 14307 14308 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 14309 return (EFAULT); 14310 14311 mutex_enter(&dtrace_lock); 14312 14313 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 14314 mutex_exit(&dtrace_lock); 14315 return (EINVAL); 14316 } 14317 14318 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 14319 14320 nrecs = aggdesc.dtagd_nrecs; 14321 aggdesc.dtagd_nrecs = 0; 14322 14323 offs = agg->dtag_base; 14324 lrec = &agg->dtag_action.dta_rec; 14325 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 14326 14327 for (act = agg->dtag_first; ; act = act->dta_next) { 14328 ASSERT(act->dta_intuple || 14329 DTRACEACT_ISAGG(act->dta_kind)); 14330 14331 /* 14332 * If this action has a record size of zero, it 14333 * denotes an argument to the aggregating action. 14334 * Because the presence of this record doesn't (or 14335 * shouldn't) affect the way the data is interpreted, 14336 * we don't copy it out to save user-level the 14337 * confusion of dealing with a zero-length record. 14338 */ 14339 if (act->dta_rec.dtrd_size == 0) { 14340 ASSERT(agg->dtag_hasarg); 14341 continue; 14342 } 14343 14344 aggdesc.dtagd_nrecs++; 14345 14346 if (act == &agg->dtag_action) 14347 break; 14348 } 14349 14350 /* 14351 * Now that we have the size, we need to allocate a temporary 14352 * buffer in which to store the complete description. We need 14353 * the temporary buffer to be able to drop dtrace_lock() 14354 * across the copyout(), below. 14355 */ 14356 size = sizeof (dtrace_aggdesc_t) + 14357 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 14358 14359 buf = kmem_alloc(size, KM_SLEEP); 14360 dest = (uintptr_t)buf; 14361 14362 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 14363 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 14364 14365 for (act = agg->dtag_first; ; act = act->dta_next) { 14366 dtrace_recdesc_t rec = act->dta_rec; 14367 14368 /* 14369 * See the comment in the above loop for why we pass 14370 * over zero-length records. 14371 */ 14372 if (rec.dtrd_size == 0) { 14373 ASSERT(agg->dtag_hasarg); 14374 continue; 14375 } 14376 14377 if (nrecs-- == 0) 14378 break; 14379 14380 rec.dtrd_offset -= offs; 14381 bcopy(&rec, (void *)dest, sizeof (rec)); 14382 dest += sizeof (dtrace_recdesc_t); 14383 14384 if (act == &agg->dtag_action) 14385 break; 14386 } 14387 14388 mutex_exit(&dtrace_lock); 14389 14390 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14391 kmem_free(buf, size); 14392 return (EFAULT); 14393 } 14394 14395 kmem_free(buf, size); 14396 return (0); 14397 } 14398 14399 case DTRACEIOC_ENABLE: { 14400 dof_hdr_t *dof; 14401 dtrace_enabling_t *enab = NULL; 14402 dtrace_vstate_t *vstate; 14403 int err = 0; 14404 14405 *rv = 0; 14406 14407 /* 14408 * If a NULL argument has been passed, we take this as our 14409 * cue to reevaluate our enablings. 14410 */ 14411 if (arg == NULL) { 14412 mutex_enter(&cpu_lock); 14413 mutex_enter(&dtrace_lock); 14414 err = dtrace_enabling_matchstate(state, rv); 14415 mutex_exit(&dtrace_lock); 14416 mutex_exit(&cpu_lock); 14417 14418 return (err); 14419 } 14420 14421 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 14422 return (rval); 14423 14424 mutex_enter(&cpu_lock); 14425 mutex_enter(&dtrace_lock); 14426 vstate = &state->dts_vstate; 14427 14428 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14429 mutex_exit(&dtrace_lock); 14430 mutex_exit(&cpu_lock); 14431 dtrace_dof_destroy(dof); 14432 return (EBUSY); 14433 } 14434 14435 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 14436 mutex_exit(&dtrace_lock); 14437 mutex_exit(&cpu_lock); 14438 dtrace_dof_destroy(dof); 14439 return (EINVAL); 14440 } 14441 14442 if ((rval = dtrace_dof_options(dof, state)) != 0) { 14443 dtrace_enabling_destroy(enab); 14444 mutex_exit(&dtrace_lock); 14445 mutex_exit(&cpu_lock); 14446 dtrace_dof_destroy(dof); 14447 return (rval); 14448 } 14449 14450 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 14451 err = dtrace_enabling_retain(enab); 14452 } else { 14453 dtrace_enabling_destroy(enab); 14454 } 14455 14456 mutex_exit(&cpu_lock); 14457 mutex_exit(&dtrace_lock); 14458 dtrace_dof_destroy(dof); 14459 14460 return (err); 14461 } 14462 14463 case DTRACEIOC_REPLICATE: { 14464 dtrace_repldesc_t desc; 14465 dtrace_probedesc_t *match = &desc.dtrpd_match; 14466 dtrace_probedesc_t *create = &desc.dtrpd_create; 14467 int err; 14468 14469 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14470 return (EFAULT); 14471 14472 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14473 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14474 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14475 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14476 14477 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14478 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14479 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14480 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14481 14482 mutex_enter(&dtrace_lock); 14483 err = dtrace_enabling_replicate(state, match, create); 14484 mutex_exit(&dtrace_lock); 14485 14486 return (err); 14487 } 14488 14489 case DTRACEIOC_PROBEMATCH: 14490 case DTRACEIOC_PROBES: { 14491 dtrace_probe_t *probe = NULL; 14492 dtrace_probedesc_t desc; 14493 dtrace_probekey_t pkey; 14494 dtrace_id_t i; 14495 int m = 0; 14496 uint32_t priv; 14497 uid_t uid; 14498 zoneid_t zoneid; 14499 14500 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14501 return (EFAULT); 14502 14503 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14504 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14505 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14506 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14507 14508 /* 14509 * Before we attempt to match this probe, we want to give 14510 * all providers the opportunity to provide it. 14511 */ 14512 if (desc.dtpd_id == DTRACE_IDNONE) { 14513 mutex_enter(&dtrace_provider_lock); 14514 dtrace_probe_provide(&desc, NULL); 14515 mutex_exit(&dtrace_provider_lock); 14516 desc.dtpd_id++; 14517 } 14518 14519 if (cmd == DTRACEIOC_PROBEMATCH) { 14520 dtrace_probekey(&desc, &pkey); 14521 pkey.dtpk_id = DTRACE_IDNONE; 14522 } 14523 14524 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 14525 14526 mutex_enter(&dtrace_lock); 14527 14528 if (cmd == DTRACEIOC_PROBEMATCH) { 14529 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14530 if ((probe = dtrace_probes[i - 1]) != NULL && 14531 (m = dtrace_match_probe(probe, &pkey, 14532 priv, uid, zoneid)) != 0) 14533 break; 14534 } 14535 14536 if (m < 0) { 14537 mutex_exit(&dtrace_lock); 14538 return (EINVAL); 14539 } 14540 14541 } else { 14542 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14543 if ((probe = dtrace_probes[i - 1]) != NULL && 14544 dtrace_match_priv(probe, priv, uid, zoneid)) 14545 break; 14546 } 14547 } 14548 14549 if (probe == NULL) { 14550 mutex_exit(&dtrace_lock); 14551 return (ESRCH); 14552 } 14553 14554 dtrace_probe_description(probe, &desc); 14555 mutex_exit(&dtrace_lock); 14556 14557 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14558 return (EFAULT); 14559 14560 return (0); 14561 } 14562 14563 case DTRACEIOC_PROBEARG: { 14564 dtrace_argdesc_t desc; 14565 dtrace_probe_t *probe; 14566 dtrace_provider_t *prov; 14567 14568 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14569 return (EFAULT); 14570 14571 if (desc.dtargd_id == DTRACE_IDNONE) 14572 return (EINVAL); 14573 14574 if (desc.dtargd_ndx == DTRACE_ARGNONE) 14575 return (EINVAL); 14576 14577 mutex_enter(&dtrace_provider_lock); 14578 mutex_enter(&mod_lock); 14579 mutex_enter(&dtrace_lock); 14580 14581 if (desc.dtargd_id > dtrace_nprobes) { 14582 mutex_exit(&dtrace_lock); 14583 mutex_exit(&mod_lock); 14584 mutex_exit(&dtrace_provider_lock); 14585 return (EINVAL); 14586 } 14587 14588 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 14589 mutex_exit(&dtrace_lock); 14590 mutex_exit(&mod_lock); 14591 mutex_exit(&dtrace_provider_lock); 14592 return (EINVAL); 14593 } 14594 14595 mutex_exit(&dtrace_lock); 14596 14597 prov = probe->dtpr_provider; 14598 14599 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 14600 /* 14601 * There isn't any typed information for this probe. 14602 * Set the argument number to DTRACE_ARGNONE. 14603 */ 14604 desc.dtargd_ndx = DTRACE_ARGNONE; 14605 } else { 14606 desc.dtargd_native[0] = '\0'; 14607 desc.dtargd_xlate[0] = '\0'; 14608 desc.dtargd_mapping = desc.dtargd_ndx; 14609 14610 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 14611 probe->dtpr_id, probe->dtpr_arg, &desc); 14612 } 14613 14614 mutex_exit(&mod_lock); 14615 mutex_exit(&dtrace_provider_lock); 14616 14617 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14618 return (EFAULT); 14619 14620 return (0); 14621 } 14622 14623 case DTRACEIOC_GO: { 14624 processorid_t cpuid; 14625 rval = dtrace_state_go(state, &cpuid); 14626 14627 if (rval != 0) 14628 return (rval); 14629 14630 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14631 return (EFAULT); 14632 14633 return (0); 14634 } 14635 14636 case DTRACEIOC_STOP: { 14637 processorid_t cpuid; 14638 14639 mutex_enter(&dtrace_lock); 14640 rval = dtrace_state_stop(state, &cpuid); 14641 mutex_exit(&dtrace_lock); 14642 14643 if (rval != 0) 14644 return (rval); 14645 14646 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14647 return (EFAULT); 14648 14649 return (0); 14650 } 14651 14652 case DTRACEIOC_DOFGET: { 14653 dof_hdr_t hdr, *dof; 14654 uint64_t len; 14655 14656 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 14657 return (EFAULT); 14658 14659 mutex_enter(&dtrace_lock); 14660 dof = dtrace_dof_create(state); 14661 mutex_exit(&dtrace_lock); 14662 14663 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 14664 rval = copyout(dof, (void *)arg, len); 14665 dtrace_dof_destroy(dof); 14666 14667 return (rval == 0 ? 0 : EFAULT); 14668 } 14669 14670 case DTRACEIOC_AGGSNAP: 14671 case DTRACEIOC_BUFSNAP: { 14672 dtrace_bufdesc_t desc; 14673 caddr_t cached; 14674 dtrace_buffer_t *buf; 14675 14676 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14677 return (EFAULT); 14678 14679 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 14680 return (EINVAL); 14681 14682 mutex_enter(&dtrace_lock); 14683 14684 if (cmd == DTRACEIOC_BUFSNAP) { 14685 buf = &state->dts_buffer[desc.dtbd_cpu]; 14686 } else { 14687 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 14688 } 14689 14690 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 14691 size_t sz = buf->dtb_offset; 14692 14693 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 14694 mutex_exit(&dtrace_lock); 14695 return (EBUSY); 14696 } 14697 14698 /* 14699 * If this buffer has already been consumed, we're 14700 * going to indicate that there's nothing left here 14701 * to consume. 14702 */ 14703 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 14704 mutex_exit(&dtrace_lock); 14705 14706 desc.dtbd_size = 0; 14707 desc.dtbd_drops = 0; 14708 desc.dtbd_errors = 0; 14709 desc.dtbd_oldest = 0; 14710 sz = sizeof (desc); 14711 14712 if (copyout(&desc, (void *)arg, sz) != 0) 14713 return (EFAULT); 14714 14715 return (0); 14716 } 14717 14718 /* 14719 * If this is a ring buffer that has wrapped, we want 14720 * to copy the whole thing out. 14721 */ 14722 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 14723 dtrace_buffer_polish(buf); 14724 sz = buf->dtb_size; 14725 } 14726 14727 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 14728 mutex_exit(&dtrace_lock); 14729 return (EFAULT); 14730 } 14731 14732 desc.dtbd_size = sz; 14733 desc.dtbd_drops = buf->dtb_drops; 14734 desc.dtbd_errors = buf->dtb_errors; 14735 desc.dtbd_oldest = buf->dtb_xamot_offset; 14736 14737 mutex_exit(&dtrace_lock); 14738 14739 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14740 return (EFAULT); 14741 14742 buf->dtb_flags |= DTRACEBUF_CONSUMED; 14743 14744 return (0); 14745 } 14746 14747 if (buf->dtb_tomax == NULL) { 14748 ASSERT(buf->dtb_xamot == NULL); 14749 mutex_exit(&dtrace_lock); 14750 return (ENOENT); 14751 } 14752 14753 cached = buf->dtb_tomax; 14754 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 14755 14756 dtrace_xcall(desc.dtbd_cpu, 14757 (dtrace_xcall_t)dtrace_buffer_switch, buf); 14758 14759 state->dts_errors += buf->dtb_xamot_errors; 14760 14761 /* 14762 * If the buffers did not actually switch, then the cross call 14763 * did not take place -- presumably because the given CPU is 14764 * not in the ready set. If this is the case, we'll return 14765 * ENOENT. 14766 */ 14767 if (buf->dtb_tomax == cached) { 14768 ASSERT(buf->dtb_xamot != cached); 14769 mutex_exit(&dtrace_lock); 14770 return (ENOENT); 14771 } 14772 14773 ASSERT(cached == buf->dtb_xamot); 14774 14775 /* 14776 * We have our snapshot; now copy it out. 14777 */ 14778 if (copyout(buf->dtb_xamot, desc.dtbd_data, 14779 buf->dtb_xamot_offset) != 0) { 14780 mutex_exit(&dtrace_lock); 14781 return (EFAULT); 14782 } 14783 14784 desc.dtbd_size = buf->dtb_xamot_offset; 14785 desc.dtbd_drops = buf->dtb_xamot_drops; 14786 desc.dtbd_errors = buf->dtb_xamot_errors; 14787 desc.dtbd_oldest = 0; 14788 14789 mutex_exit(&dtrace_lock); 14790 14791 /* 14792 * Finally, copy out the buffer description. 14793 */ 14794 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14795 return (EFAULT); 14796 14797 return (0); 14798 } 14799 14800 case DTRACEIOC_CONF: { 14801 dtrace_conf_t conf; 14802 14803 bzero(&conf, sizeof (conf)); 14804 conf.dtc_difversion = DIF_VERSION; 14805 conf.dtc_difintregs = DIF_DIR_NREGS; 14806 conf.dtc_diftupregs = DIF_DTR_NREGS; 14807 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 14808 14809 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 14810 return (EFAULT); 14811 14812 return (0); 14813 } 14814 14815 case DTRACEIOC_STATUS: { 14816 dtrace_status_t stat; 14817 dtrace_dstate_t *dstate; 14818 int i, j; 14819 uint64_t nerrs; 14820 14821 /* 14822 * See the comment in dtrace_state_deadman() for the reason 14823 * for setting dts_laststatus to INT64_MAX before setting 14824 * it to the correct value. 14825 */ 14826 state->dts_laststatus = INT64_MAX; 14827 dtrace_membar_producer(); 14828 state->dts_laststatus = dtrace_gethrtime(); 14829 14830 bzero(&stat, sizeof (stat)); 14831 14832 mutex_enter(&dtrace_lock); 14833 14834 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 14835 mutex_exit(&dtrace_lock); 14836 return (ENOENT); 14837 } 14838 14839 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 14840 stat.dtst_exiting = 1; 14841 14842 nerrs = state->dts_errors; 14843 dstate = &state->dts_vstate.dtvs_dynvars; 14844 14845 for (i = 0; i < NCPU; i++) { 14846 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 14847 14848 stat.dtst_dyndrops += dcpu->dtdsc_drops; 14849 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 14850 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 14851 14852 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 14853 stat.dtst_filled++; 14854 14855 nerrs += state->dts_buffer[i].dtb_errors; 14856 14857 for (j = 0; j < state->dts_nspeculations; j++) { 14858 dtrace_speculation_t *spec; 14859 dtrace_buffer_t *buf; 14860 14861 spec = &state->dts_speculations[j]; 14862 buf = &spec->dtsp_buffer[i]; 14863 stat.dtst_specdrops += buf->dtb_xamot_drops; 14864 } 14865 } 14866 14867 stat.dtst_specdrops_busy = state->dts_speculations_busy; 14868 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 14869 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 14870 stat.dtst_dblerrors = state->dts_dblerrors; 14871 stat.dtst_killed = 14872 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 14873 stat.dtst_errors = nerrs; 14874 14875 mutex_exit(&dtrace_lock); 14876 14877 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 14878 return (EFAULT); 14879 14880 return (0); 14881 } 14882 14883 case DTRACEIOC_FORMAT: { 14884 dtrace_fmtdesc_t fmt; 14885 char *str; 14886 int len; 14887 14888 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 14889 return (EFAULT); 14890 14891 mutex_enter(&dtrace_lock); 14892 14893 if (fmt.dtfd_format == 0 || 14894 fmt.dtfd_format > state->dts_nformats) { 14895 mutex_exit(&dtrace_lock); 14896 return (EINVAL); 14897 } 14898 14899 /* 14900 * Format strings are allocated contiguously and they are 14901 * never freed; if a format index is less than the number 14902 * of formats, we can assert that the format map is non-NULL 14903 * and that the format for the specified index is non-NULL. 14904 */ 14905 ASSERT(state->dts_formats != NULL); 14906 str = state->dts_formats[fmt.dtfd_format - 1]; 14907 ASSERT(str != NULL); 14908 14909 len = strlen(str) + 1; 14910 14911 if (len > fmt.dtfd_length) { 14912 fmt.dtfd_length = len; 14913 14914 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 14915 mutex_exit(&dtrace_lock); 14916 return (EINVAL); 14917 } 14918 } else { 14919 if (copyout(str, fmt.dtfd_string, len) != 0) { 14920 mutex_exit(&dtrace_lock); 14921 return (EINVAL); 14922 } 14923 } 14924 14925 mutex_exit(&dtrace_lock); 14926 return (0); 14927 } 14928 14929 default: 14930 break; 14931 } 14932 14933 return (ENOTTY); 14934 } 14935 14936 /*ARGSUSED*/ 14937 static int 14938 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 14939 { 14940 dtrace_state_t *state; 14941 14942 switch (cmd) { 14943 case DDI_DETACH: 14944 break; 14945 14946 case DDI_SUSPEND: 14947 return (DDI_SUCCESS); 14948 14949 default: 14950 return (DDI_FAILURE); 14951 } 14952 14953 mutex_enter(&cpu_lock); 14954 mutex_enter(&dtrace_provider_lock); 14955 mutex_enter(&dtrace_lock); 14956 14957 ASSERT(dtrace_opens == 0); 14958 14959 if (dtrace_helpers > 0) { 14960 mutex_exit(&dtrace_provider_lock); 14961 mutex_exit(&dtrace_lock); 14962 mutex_exit(&cpu_lock); 14963 return (DDI_FAILURE); 14964 } 14965 14966 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 14967 mutex_exit(&dtrace_provider_lock); 14968 mutex_exit(&dtrace_lock); 14969 mutex_exit(&cpu_lock); 14970 return (DDI_FAILURE); 14971 } 14972 14973 dtrace_provider = NULL; 14974 14975 if ((state = dtrace_anon_grab()) != NULL) { 14976 /* 14977 * If there were ECBs on this state, the provider should 14978 * have not been allowed to detach; assert that there is 14979 * none. 14980 */ 14981 ASSERT(state->dts_necbs == 0); 14982 dtrace_state_destroy(state); 14983 14984 /* 14985 * If we're being detached with anonymous state, we need to 14986 * indicate to the kernel debugger that DTrace is now inactive. 14987 */ 14988 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14989 } 14990 14991 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 14992 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14993 dtrace_cpu_init = NULL; 14994 dtrace_helpers_cleanup = NULL; 14995 dtrace_helpers_fork = NULL; 14996 dtrace_cpustart_init = NULL; 14997 dtrace_cpustart_fini = NULL; 14998 dtrace_debugger_init = NULL; 14999 dtrace_debugger_fini = NULL; 15000 dtrace_kreloc_init = NULL; 15001 dtrace_kreloc_fini = NULL; 15002 dtrace_modload = NULL; 15003 dtrace_modunload = NULL; 15004 15005 mutex_exit(&cpu_lock); 15006 15007 if (dtrace_helptrace_enabled) { 15008 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 15009 dtrace_helptrace_buffer = NULL; 15010 } 15011 15012 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 15013 dtrace_probes = NULL; 15014 dtrace_nprobes = 0; 15015 15016 dtrace_hash_destroy(dtrace_bymod); 15017 dtrace_hash_destroy(dtrace_byfunc); 15018 dtrace_hash_destroy(dtrace_byname); 15019 dtrace_bymod = NULL; 15020 dtrace_byfunc = NULL; 15021 dtrace_byname = NULL; 15022 15023 kmem_cache_destroy(dtrace_state_cache); 15024 vmem_destroy(dtrace_minor); 15025 vmem_destroy(dtrace_arena); 15026 15027 if (dtrace_toxrange != NULL) { 15028 kmem_free(dtrace_toxrange, 15029 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 15030 dtrace_toxrange = NULL; 15031 dtrace_toxranges = 0; 15032 dtrace_toxranges_max = 0; 15033 } 15034 15035 ddi_remove_minor_node(dtrace_devi, NULL); 15036 dtrace_devi = NULL; 15037 15038 ddi_soft_state_fini(&dtrace_softstate); 15039 15040 ASSERT(dtrace_vtime_references == 0); 15041 ASSERT(dtrace_opens == 0); 15042 ASSERT(dtrace_retained == NULL); 15043 15044 mutex_exit(&dtrace_lock); 15045 mutex_exit(&dtrace_provider_lock); 15046 15047 /* 15048 * We don't destroy the task queue until after we have dropped our 15049 * locks (taskq_destroy() may block on running tasks). To prevent 15050 * attempting to do work after we have effectively detached but before 15051 * the task queue has been destroyed, all tasks dispatched via the 15052 * task queue must check that DTrace is still attached before 15053 * performing any operation. 15054 */ 15055 taskq_destroy(dtrace_taskq); 15056 dtrace_taskq = NULL; 15057 15058 return (DDI_SUCCESS); 15059 } 15060 15061 /*ARGSUSED*/ 15062 static int 15063 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 15064 { 15065 int error; 15066 15067 switch (infocmd) { 15068 case DDI_INFO_DEVT2DEVINFO: 15069 *result = (void *)dtrace_devi; 15070 error = DDI_SUCCESS; 15071 break; 15072 case DDI_INFO_DEVT2INSTANCE: 15073 *result = (void *)0; 15074 error = DDI_SUCCESS; 15075 break; 15076 default: 15077 error = DDI_FAILURE; 15078 } 15079 return (error); 15080 } 15081 15082 static struct cb_ops dtrace_cb_ops = { 15083 dtrace_open, /* open */ 15084 dtrace_close, /* close */ 15085 nulldev, /* strategy */ 15086 nulldev, /* print */ 15087 nodev, /* dump */ 15088 nodev, /* read */ 15089 nodev, /* write */ 15090 dtrace_ioctl, /* ioctl */ 15091 nodev, /* devmap */ 15092 nodev, /* mmap */ 15093 nodev, /* segmap */ 15094 nochpoll, /* poll */ 15095 ddi_prop_op, /* cb_prop_op */ 15096 0, /* streamtab */ 15097 D_NEW | D_MP /* Driver compatibility flag */ 15098 }; 15099 15100 static struct dev_ops dtrace_ops = { 15101 DEVO_REV, /* devo_rev */ 15102 0, /* refcnt */ 15103 dtrace_info, /* get_dev_info */ 15104 nulldev, /* identify */ 15105 nulldev, /* probe */ 15106 dtrace_attach, /* attach */ 15107 dtrace_detach, /* detach */ 15108 nodev, /* reset */ 15109 &dtrace_cb_ops, /* driver operations */ 15110 NULL, /* bus operations */ 15111 nodev /* dev power */ 15112 }; 15113 15114 static struct modldrv modldrv = { 15115 &mod_driverops, /* module type (this is a pseudo driver) */ 15116 "Dynamic Tracing", /* name of module */ 15117 &dtrace_ops, /* driver ops */ 15118 }; 15119 15120 static struct modlinkage modlinkage = { 15121 MODREV_1, 15122 (void *)&modldrv, 15123 NULL 15124 }; 15125 15126 int 15127 _init(void) 15128 { 15129 return (mod_install(&modlinkage)); 15130 } 15131 15132 int 15133 _info(struct modinfo *modinfop) 15134 { 15135 return (mod_info(&modlinkage, modinfop)); 15136 } 15137 15138 int 15139 _fini(void) 15140 { 15141 return (mod_remove(&modlinkage)); 15142 } 15143