1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 25 */ 26 27 /* 28 * DTrace - Dynamic Tracing for Solaris 29 * 30 * This is the implementation of the Solaris Dynamic Tracing framework 31 * (DTrace). The user-visible interface to DTrace is described at length in 32 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 33 * library, the in-kernel DTrace framework, and the DTrace providers are 34 * described in the block comments in the <sys/dtrace.h> header file. The 35 * internal architecture of DTrace is described in the block comments in the 36 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 37 * implementation very much assume mastery of all of these sources; if one has 38 * an unanswered question about the implementation, one should consult them 39 * first. 40 * 41 * The functions here are ordered roughly as follows: 42 * 43 * - Probe context functions 44 * - Probe hashing functions 45 * - Non-probe context utility functions 46 * - Matching functions 47 * - Provider-to-Framework API functions 48 * - Probe management functions 49 * - DIF object functions 50 * - Format functions 51 * - Predicate functions 52 * - ECB functions 53 * - Buffer functions 54 * - Enabling functions 55 * - DOF functions 56 * - Anonymous enabling functions 57 * - Consumer state functions 58 * - Helper functions 59 * - Hook functions 60 * - Driver cookbook functions 61 * 62 * Each group of functions begins with a block comment labelled the "DTrace 63 * [Group] Functions", allowing one to find each block by searching forward 64 * on capital-f functions. 65 */ 66 #include <sys/errno.h> 67 #include <sys/stat.h> 68 #include <sys/modctl.h> 69 #include <sys/conf.h> 70 #include <sys/systm.h> 71 #include <sys/ddi.h> 72 #include <sys/sunddi.h> 73 #include <sys/cpuvar.h> 74 #include <sys/kmem.h> 75 #include <sys/strsubr.h> 76 #include <sys/sysmacros.h> 77 #include <sys/dtrace_impl.h> 78 #include <sys/atomic.h> 79 #include <sys/cmn_err.h> 80 #include <sys/mutex_impl.h> 81 #include <sys/rwlock_impl.h> 82 #include <sys/ctf_api.h> 83 #include <sys/panic.h> 84 #include <sys/priv_impl.h> 85 #include <sys/policy.h> 86 #include <sys/cred_impl.h> 87 #include <sys/procfs_isa.h> 88 #include <sys/taskq.h> 89 #include <sys/mkdev.h> 90 #include <sys/kdi.h> 91 #include <sys/zone.h> 92 #include <sys/socket.h> 93 #include <netinet/in.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 188 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 189 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 190 191 /* 192 * DTrace Locking 193 * DTrace is protected by three (relatively coarse-grained) locks: 194 * 195 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 196 * including enabling state, probes, ECBs, consumer state, helper state, 197 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 198 * probe context is lock-free -- synchronization is handled via the 199 * dtrace_sync() cross call mechanism. 200 * 201 * (2) dtrace_provider_lock is required when manipulating provider state, or 202 * when provider state must be held constant. 203 * 204 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 205 * when meta provider state must be held constant. 206 * 207 * The lock ordering between these three locks is dtrace_meta_lock before 208 * dtrace_provider_lock before dtrace_lock. (In particular, there are 209 * several places where dtrace_provider_lock is held by the framework as it 210 * calls into the providers -- which then call back into the framework, 211 * grabbing dtrace_lock.) 212 * 213 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 214 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 215 * role as a coarse-grained lock; it is acquired before both of these locks. 216 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 217 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 218 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 219 * acquired _between_ dtrace_provider_lock and dtrace_lock. 220 */ 221 static kmutex_t dtrace_lock; /* probe state lock */ 222 static kmutex_t dtrace_provider_lock; /* provider state lock */ 223 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 224 225 /* 226 * DTrace Provider Variables 227 * 228 * These are the variables relating to DTrace as a provider (that is, the 229 * provider of the BEGIN, END, and ERROR probes). 230 */ 231 static dtrace_pattr_t dtrace_provider_attr = { 232 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 233 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 234 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 235 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 237 }; 238 239 static void 240 dtrace_nullop(void) 241 {} 242 243 static int 244 dtrace_enable_nullop(void) 245 { 246 return (0); 247 } 248 249 static dtrace_pops_t dtrace_provider_ops = { 250 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 251 (void (*)(void *, struct modctl *))dtrace_nullop, 252 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop, 253 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 254 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 255 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 256 NULL, 257 NULL, 258 NULL, 259 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 260 }; 261 262 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 263 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 264 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 265 266 /* 267 * DTrace Helper Tracing Variables 268 */ 269 uint32_t dtrace_helptrace_next = 0; 270 uint32_t dtrace_helptrace_nlocals; 271 char *dtrace_helptrace_buffer; 272 int dtrace_helptrace_bufsize = 512 * 1024; 273 274 #ifdef DEBUG 275 int dtrace_helptrace_enabled = 1; 276 #else 277 int dtrace_helptrace_enabled = 0; 278 #endif 279 280 /* 281 * DTrace Error Hashing 282 * 283 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 284 * table. This is very useful for checking coverage of tests that are 285 * expected to induce DIF or DOF processing errors, and may be useful for 286 * debugging problems in the DIF code generator or in DOF generation . The 287 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 288 */ 289 #ifdef DEBUG 290 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 291 static const char *dtrace_errlast; 292 static kthread_t *dtrace_errthread; 293 static kmutex_t dtrace_errlock; 294 #endif 295 296 /* 297 * DTrace Macros and Constants 298 * 299 * These are various macros that are useful in various spots in the 300 * implementation, along with a few random constants that have no meaning 301 * outside of the implementation. There is no real structure to this cpp 302 * mishmash -- but is there ever? 303 */ 304 #define DTRACE_HASHSTR(hash, probe) \ 305 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 306 307 #define DTRACE_HASHNEXT(hash, probe) \ 308 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 309 310 #define DTRACE_HASHPREV(hash, probe) \ 311 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 312 313 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 314 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 315 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 316 317 #define DTRACE_AGGHASHSIZE_SLEW 17 318 319 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 320 321 /* 322 * The key for a thread-local variable consists of the lower 61 bits of the 323 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 324 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 325 * equal to a variable identifier. This is necessary (but not sufficient) to 326 * assure that global associative arrays never collide with thread-local 327 * variables. To guarantee that they cannot collide, we must also define the 328 * order for keying dynamic variables. That order is: 329 * 330 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 331 * 332 * Because the variable-key and the tls-key are in orthogonal spaces, there is 333 * no way for a global variable key signature to match a thread-local key 334 * signature. 335 */ 336 #define DTRACE_TLS_THRKEY(where) { \ 337 uint_t intr = 0; \ 338 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 339 for (; actv; actv >>= 1) \ 340 intr++; \ 341 ASSERT(intr < (1 << 3)); \ 342 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 343 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 344 } 345 346 #define DT_BSWAP_8(x) ((x) & 0xff) 347 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 348 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 349 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 350 351 #define DT_MASK_LO 0x00000000FFFFFFFFULL 352 353 #define DTRACE_STORE(type, tomax, offset, what) \ 354 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 355 356 #ifndef __i386 357 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 358 if (addr & (size - 1)) { \ 359 *flags |= CPU_DTRACE_BADALIGN; \ 360 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 361 return (0); \ 362 } 363 #else 364 #define DTRACE_ALIGNCHECK(addr, size, flags) 365 #endif 366 367 /* 368 * Test whether a range of memory starting at testaddr of size testsz falls 369 * within the range of memory described by addr, sz. We take care to avoid 370 * problems with overflow and underflow of the unsigned quantities, and 371 * disallow all negative sizes. Ranges of size 0 are allowed. 372 */ 373 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 374 ((testaddr) - (baseaddr) < (basesz) && \ 375 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 376 (testaddr) + (testsz) >= (testaddr)) 377 378 /* 379 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 380 * alloc_sz on the righthand side of the comparison in order to avoid overflow 381 * or underflow in the comparison with it. This is simpler than the INRANGE 382 * check above, because we know that the dtms_scratch_ptr is valid in the 383 * range. Allocations of size zero are allowed. 384 */ 385 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 386 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 387 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 388 389 #define DTRACE_LOADFUNC(bits) \ 390 /*CSTYLED*/ \ 391 uint##bits##_t \ 392 dtrace_load##bits(uintptr_t addr) \ 393 { \ 394 size_t size = bits / NBBY; \ 395 /*CSTYLED*/ \ 396 uint##bits##_t rval; \ 397 int i; \ 398 volatile uint16_t *flags = (volatile uint16_t *) \ 399 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 400 \ 401 DTRACE_ALIGNCHECK(addr, size, flags); \ 402 \ 403 for (i = 0; i < dtrace_toxranges; i++) { \ 404 if (addr >= dtrace_toxrange[i].dtt_limit) \ 405 continue; \ 406 \ 407 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 408 continue; \ 409 \ 410 /* \ 411 * This address falls within a toxic region; return 0. \ 412 */ \ 413 *flags |= CPU_DTRACE_BADADDR; \ 414 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 415 return (0); \ 416 } \ 417 \ 418 *flags |= CPU_DTRACE_NOFAULT; \ 419 /*CSTYLED*/ \ 420 rval = *((volatile uint##bits##_t *)addr); \ 421 *flags &= ~CPU_DTRACE_NOFAULT; \ 422 \ 423 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 424 } 425 426 #ifdef _LP64 427 #define dtrace_loadptr dtrace_load64 428 #else 429 #define dtrace_loadptr dtrace_load32 430 #endif 431 432 #define DTRACE_DYNHASH_FREE 0 433 #define DTRACE_DYNHASH_SINK 1 434 #define DTRACE_DYNHASH_VALID 2 435 436 #define DTRACE_MATCH_FAIL -1 437 #define DTRACE_MATCH_NEXT 0 438 #define DTRACE_MATCH_DONE 1 439 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 440 #define DTRACE_STATE_ALIGN 64 441 442 #define DTRACE_FLAGS2FLT(flags) \ 443 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 444 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 445 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 446 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 447 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 448 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 449 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 450 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 451 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 452 DTRACEFLT_UNKNOWN) 453 454 #define DTRACEACT_ISSTRING(act) \ 455 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 456 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 457 458 static size_t dtrace_strlen(const char *, size_t); 459 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 460 static void dtrace_enabling_provide(dtrace_provider_t *); 461 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 462 static void dtrace_enabling_matchall(void); 463 static dtrace_state_t *dtrace_anon_grab(void); 464 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 465 dtrace_state_t *, uint64_t, uint64_t); 466 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 467 static void dtrace_buffer_drop(dtrace_buffer_t *); 468 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 469 dtrace_state_t *, dtrace_mstate_t *); 470 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 471 dtrace_optval_t); 472 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 473 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 474 475 /* 476 * DTrace Probe Context Functions 477 * 478 * These functions are called from probe context. Because probe context is 479 * any context in which C may be called, arbitrarily locks may be held, 480 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 481 * As a result, functions called from probe context may only call other DTrace 482 * support functions -- they may not interact at all with the system at large. 483 * (Note that the ASSERT macro is made probe-context safe by redefining it in 484 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 485 * loads are to be performed from probe context, they _must_ be in terms of 486 * the safe dtrace_load*() variants. 487 * 488 * Some functions in this block are not actually called from probe context; 489 * for these functions, there will be a comment above the function reading 490 * "Note: not called from probe context." 491 */ 492 void 493 dtrace_panic(const char *format, ...) 494 { 495 va_list alist; 496 497 va_start(alist, format); 498 dtrace_vpanic(format, alist); 499 va_end(alist); 500 } 501 502 int 503 dtrace_assfail(const char *a, const char *f, int l) 504 { 505 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 506 507 /* 508 * We just need something here that even the most clever compiler 509 * cannot optimize away. 510 */ 511 return (a[(uintptr_t)f]); 512 } 513 514 /* 515 * Atomically increment a specified error counter from probe context. 516 */ 517 static void 518 dtrace_error(uint32_t *counter) 519 { 520 /* 521 * Most counters stored to in probe context are per-CPU counters. 522 * However, there are some error conditions that are sufficiently 523 * arcane that they don't merit per-CPU storage. If these counters 524 * are incremented concurrently on different CPUs, scalability will be 525 * adversely affected -- but we don't expect them to be white-hot in a 526 * correctly constructed enabling... 527 */ 528 uint32_t oval, nval; 529 530 do { 531 oval = *counter; 532 533 if ((nval = oval + 1) == 0) { 534 /* 535 * If the counter would wrap, set it to 1 -- assuring 536 * that the counter is never zero when we have seen 537 * errors. (The counter must be 32-bits because we 538 * aren't guaranteed a 64-bit compare&swap operation.) 539 * To save this code both the infamy of being fingered 540 * by a priggish news story and the indignity of being 541 * the target of a neo-puritan witch trial, we're 542 * carefully avoiding any colorful description of the 543 * likelihood of this condition -- but suffice it to 544 * say that it is only slightly more likely than the 545 * overflow of predicate cache IDs, as discussed in 546 * dtrace_predicate_create(). 547 */ 548 nval = 1; 549 } 550 } while (dtrace_cas32(counter, oval, nval) != oval); 551 } 552 553 /* 554 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 555 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 556 */ 557 DTRACE_LOADFUNC(8) 558 DTRACE_LOADFUNC(16) 559 DTRACE_LOADFUNC(32) 560 DTRACE_LOADFUNC(64) 561 562 static int 563 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 564 { 565 if (dest < mstate->dtms_scratch_base) 566 return (0); 567 568 if (dest + size < dest) 569 return (0); 570 571 if (dest + size > mstate->dtms_scratch_ptr) 572 return (0); 573 574 return (1); 575 } 576 577 static int 578 dtrace_canstore_statvar(uint64_t addr, size_t sz, 579 dtrace_statvar_t **svars, int nsvars) 580 { 581 int i; 582 583 for (i = 0; i < nsvars; i++) { 584 dtrace_statvar_t *svar = svars[i]; 585 586 if (svar == NULL || svar->dtsv_size == 0) 587 continue; 588 589 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 590 return (1); 591 } 592 593 return (0); 594 } 595 596 /* 597 * Check to see if the address is within a memory region to which a store may 598 * be issued. This includes the DTrace scratch areas, and any DTrace variable 599 * region. The caller of dtrace_canstore() is responsible for performing any 600 * alignment checks that are needed before stores are actually executed. 601 */ 602 static int 603 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 604 dtrace_vstate_t *vstate) 605 { 606 /* 607 * First, check to see if the address is in scratch space... 608 */ 609 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 610 mstate->dtms_scratch_size)) 611 return (1); 612 613 /* 614 * Now check to see if it's a dynamic variable. This check will pick 615 * up both thread-local variables and any global dynamically-allocated 616 * variables. 617 */ 618 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 619 vstate->dtvs_dynvars.dtds_size)) { 620 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 621 uintptr_t base = (uintptr_t)dstate->dtds_base + 622 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 623 uintptr_t chunkoffs; 624 625 /* 626 * Before we assume that we can store here, we need to make 627 * sure that it isn't in our metadata -- storing to our 628 * dynamic variable metadata would corrupt our state. For 629 * the range to not include any dynamic variable metadata, 630 * it must: 631 * 632 * (1) Start above the hash table that is at the base of 633 * the dynamic variable space 634 * 635 * (2) Have a starting chunk offset that is beyond the 636 * dtrace_dynvar_t that is at the base of every chunk 637 * 638 * (3) Not span a chunk boundary 639 * 640 */ 641 if (addr < base) 642 return (0); 643 644 chunkoffs = (addr - base) % dstate->dtds_chunksize; 645 646 if (chunkoffs < sizeof (dtrace_dynvar_t)) 647 return (0); 648 649 if (chunkoffs + sz > dstate->dtds_chunksize) 650 return (0); 651 652 return (1); 653 } 654 655 /* 656 * Finally, check the static local and global variables. These checks 657 * take the longest, so we perform them last. 658 */ 659 if (dtrace_canstore_statvar(addr, sz, 660 vstate->dtvs_locals, vstate->dtvs_nlocals)) 661 return (1); 662 663 if (dtrace_canstore_statvar(addr, sz, 664 vstate->dtvs_globals, vstate->dtvs_nglobals)) 665 return (1); 666 667 return (0); 668 } 669 670 671 /* 672 * Convenience routine to check to see if the address is within a memory 673 * region in which a load may be issued given the user's privilege level; 674 * if not, it sets the appropriate error flags and loads 'addr' into the 675 * illegal value slot. 676 * 677 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 678 * appropriate memory access protection. 679 */ 680 static int 681 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 682 dtrace_vstate_t *vstate) 683 { 684 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 685 686 /* 687 * If we hold the privilege to read from kernel memory, then 688 * everything is readable. 689 */ 690 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 691 return (1); 692 693 /* 694 * You can obviously read that which you can store. 695 */ 696 if (dtrace_canstore(addr, sz, mstate, vstate)) 697 return (1); 698 699 /* 700 * We're allowed to read from our own string table. 701 */ 702 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 703 mstate->dtms_difo->dtdo_strlen)) 704 return (1); 705 706 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 707 *illval = addr; 708 return (0); 709 } 710 711 /* 712 * Convenience routine to check to see if a given string is within a memory 713 * region in which a load may be issued given the user's privilege level; 714 * this exists so that we don't need to issue unnecessary dtrace_strlen() 715 * calls in the event that the user has all privileges. 716 */ 717 static int 718 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 719 dtrace_vstate_t *vstate) 720 { 721 size_t strsz; 722 723 /* 724 * If we hold the privilege to read from kernel memory, then 725 * everything is readable. 726 */ 727 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 728 return (1); 729 730 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 731 if (dtrace_canload(addr, strsz, mstate, vstate)) 732 return (1); 733 734 return (0); 735 } 736 737 /* 738 * Convenience routine to check to see if a given variable is within a memory 739 * region in which a load may be issued given the user's privilege level. 740 */ 741 static int 742 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 743 dtrace_vstate_t *vstate) 744 { 745 size_t sz; 746 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 747 748 /* 749 * If we hold the privilege to read from kernel memory, then 750 * everything is readable. 751 */ 752 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 753 return (1); 754 755 if (type->dtdt_kind == DIF_TYPE_STRING) 756 sz = dtrace_strlen(src, 757 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 758 else 759 sz = type->dtdt_size; 760 761 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 762 } 763 764 /* 765 * Compare two strings using safe loads. 766 */ 767 static int 768 dtrace_strncmp(char *s1, char *s2, size_t limit) 769 { 770 uint8_t c1, c2; 771 volatile uint16_t *flags; 772 773 if (s1 == s2 || limit == 0) 774 return (0); 775 776 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 777 778 do { 779 if (s1 == NULL) { 780 c1 = '\0'; 781 } else { 782 c1 = dtrace_load8((uintptr_t)s1++); 783 } 784 785 if (s2 == NULL) { 786 c2 = '\0'; 787 } else { 788 c2 = dtrace_load8((uintptr_t)s2++); 789 } 790 791 if (c1 != c2) 792 return (c1 - c2); 793 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 794 795 return (0); 796 } 797 798 /* 799 * Compute strlen(s) for a string using safe memory accesses. The additional 800 * len parameter is used to specify a maximum length to ensure completion. 801 */ 802 static size_t 803 dtrace_strlen(const char *s, size_t lim) 804 { 805 uint_t len; 806 807 for (len = 0; len != lim; len++) { 808 if (dtrace_load8((uintptr_t)s++) == '\0') 809 break; 810 } 811 812 return (len); 813 } 814 815 /* 816 * Check if an address falls within a toxic region. 817 */ 818 static int 819 dtrace_istoxic(uintptr_t kaddr, size_t size) 820 { 821 uintptr_t taddr, tsize; 822 int i; 823 824 for (i = 0; i < dtrace_toxranges; i++) { 825 taddr = dtrace_toxrange[i].dtt_base; 826 tsize = dtrace_toxrange[i].dtt_limit - taddr; 827 828 if (kaddr - taddr < tsize) { 829 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 830 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 831 return (1); 832 } 833 834 if (taddr - kaddr < size) { 835 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 836 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 837 return (1); 838 } 839 } 840 841 return (0); 842 } 843 844 /* 845 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 846 * memory specified by the DIF program. The dst is assumed to be safe memory 847 * that we can store to directly because it is managed by DTrace. As with 848 * standard bcopy, overlapping copies are handled properly. 849 */ 850 static void 851 dtrace_bcopy(const void *src, void *dst, size_t len) 852 { 853 if (len != 0) { 854 uint8_t *s1 = dst; 855 const uint8_t *s2 = src; 856 857 if (s1 <= s2) { 858 do { 859 *s1++ = dtrace_load8((uintptr_t)s2++); 860 } while (--len != 0); 861 } else { 862 s2 += len; 863 s1 += len; 864 865 do { 866 *--s1 = dtrace_load8((uintptr_t)--s2); 867 } while (--len != 0); 868 } 869 } 870 } 871 872 /* 873 * Copy src to dst using safe memory accesses, up to either the specified 874 * length, or the point that a nul byte is encountered. The src is assumed to 875 * be unsafe memory specified by the DIF program. The dst is assumed to be 876 * safe memory that we can store to directly because it is managed by DTrace. 877 * Unlike dtrace_bcopy(), overlapping regions are not handled. 878 */ 879 static void 880 dtrace_strcpy(const void *src, void *dst, size_t len) 881 { 882 if (len != 0) { 883 uint8_t *s1 = dst, c; 884 const uint8_t *s2 = src; 885 886 do { 887 *s1++ = c = dtrace_load8((uintptr_t)s2++); 888 } while (--len != 0 && c != '\0'); 889 } 890 } 891 892 /* 893 * Copy src to dst, deriving the size and type from the specified (BYREF) 894 * variable type. The src is assumed to be unsafe memory specified by the DIF 895 * program. The dst is assumed to be DTrace variable memory that is of the 896 * specified type; we assume that we can store to directly. 897 */ 898 static void 899 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 900 { 901 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 902 903 if (type->dtdt_kind == DIF_TYPE_STRING) { 904 dtrace_strcpy(src, dst, type->dtdt_size); 905 } else { 906 dtrace_bcopy(src, dst, type->dtdt_size); 907 } 908 } 909 910 /* 911 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 912 * unsafe memory specified by the DIF program. The s2 data is assumed to be 913 * safe memory that we can access directly because it is managed by DTrace. 914 */ 915 static int 916 dtrace_bcmp(const void *s1, const void *s2, size_t len) 917 { 918 volatile uint16_t *flags; 919 920 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 921 922 if (s1 == s2) 923 return (0); 924 925 if (s1 == NULL || s2 == NULL) 926 return (1); 927 928 if (s1 != s2 && len != 0) { 929 const uint8_t *ps1 = s1; 930 const uint8_t *ps2 = s2; 931 932 do { 933 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 934 return (1); 935 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 936 } 937 return (0); 938 } 939 940 /* 941 * Zero the specified region using a simple byte-by-byte loop. Note that this 942 * is for safe DTrace-managed memory only. 943 */ 944 static void 945 dtrace_bzero(void *dst, size_t len) 946 { 947 uchar_t *cp; 948 949 for (cp = dst; len != 0; len--) 950 *cp++ = 0; 951 } 952 953 static void 954 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 955 { 956 uint64_t result[2]; 957 958 result[0] = addend1[0] + addend2[0]; 959 result[1] = addend1[1] + addend2[1] + 960 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 961 962 sum[0] = result[0]; 963 sum[1] = result[1]; 964 } 965 966 /* 967 * Shift the 128-bit value in a by b. If b is positive, shift left. 968 * If b is negative, shift right. 969 */ 970 static void 971 dtrace_shift_128(uint64_t *a, int b) 972 { 973 uint64_t mask; 974 975 if (b == 0) 976 return; 977 978 if (b < 0) { 979 b = -b; 980 if (b >= 64) { 981 a[0] = a[1] >> (b - 64); 982 a[1] = 0; 983 } else { 984 a[0] >>= b; 985 mask = 1LL << (64 - b); 986 mask -= 1; 987 a[0] |= ((a[1] & mask) << (64 - b)); 988 a[1] >>= b; 989 } 990 } else { 991 if (b >= 64) { 992 a[1] = a[0] << (b - 64); 993 a[0] = 0; 994 } else { 995 a[1] <<= b; 996 mask = a[0] >> (64 - b); 997 a[1] |= mask; 998 a[0] <<= b; 999 } 1000 } 1001 } 1002 1003 /* 1004 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1005 * use native multiplication on those, and then re-combine into the 1006 * resulting 128-bit value. 1007 * 1008 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1009 * hi1 * hi2 << 64 + 1010 * hi1 * lo2 << 32 + 1011 * hi2 * lo1 << 32 + 1012 * lo1 * lo2 1013 */ 1014 static void 1015 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1016 { 1017 uint64_t hi1, hi2, lo1, lo2; 1018 uint64_t tmp[2]; 1019 1020 hi1 = factor1 >> 32; 1021 hi2 = factor2 >> 32; 1022 1023 lo1 = factor1 & DT_MASK_LO; 1024 lo2 = factor2 & DT_MASK_LO; 1025 1026 product[0] = lo1 * lo2; 1027 product[1] = hi1 * hi2; 1028 1029 tmp[0] = hi1 * lo2; 1030 tmp[1] = 0; 1031 dtrace_shift_128(tmp, 32); 1032 dtrace_add_128(product, tmp, product); 1033 1034 tmp[0] = hi2 * lo1; 1035 tmp[1] = 0; 1036 dtrace_shift_128(tmp, 32); 1037 dtrace_add_128(product, tmp, product); 1038 } 1039 1040 /* 1041 * This privilege check should be used by actions and subroutines to 1042 * verify that the user credentials of the process that enabled the 1043 * invoking ECB match the target credentials 1044 */ 1045 static int 1046 dtrace_priv_proc_common_user(dtrace_state_t *state) 1047 { 1048 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1049 1050 /* 1051 * We should always have a non-NULL state cred here, since if cred 1052 * is null (anonymous tracing), we fast-path bypass this routine. 1053 */ 1054 ASSERT(s_cr != NULL); 1055 1056 if ((cr = CRED()) != NULL && 1057 s_cr->cr_uid == cr->cr_uid && 1058 s_cr->cr_uid == cr->cr_ruid && 1059 s_cr->cr_uid == cr->cr_suid && 1060 s_cr->cr_gid == cr->cr_gid && 1061 s_cr->cr_gid == cr->cr_rgid && 1062 s_cr->cr_gid == cr->cr_sgid) 1063 return (1); 1064 1065 return (0); 1066 } 1067 1068 /* 1069 * This privilege check should be used by actions and subroutines to 1070 * verify that the zone of the process that enabled the invoking ECB 1071 * matches the target credentials 1072 */ 1073 static int 1074 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1075 { 1076 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1077 1078 /* 1079 * We should always have a non-NULL state cred here, since if cred 1080 * is null (anonymous tracing), we fast-path bypass this routine. 1081 */ 1082 ASSERT(s_cr != NULL); 1083 1084 if ((cr = CRED()) != NULL && 1085 s_cr->cr_zone == cr->cr_zone) 1086 return (1); 1087 1088 return (0); 1089 } 1090 1091 /* 1092 * This privilege check should be used by actions and subroutines to 1093 * verify that the process has not setuid or changed credentials. 1094 */ 1095 static int 1096 dtrace_priv_proc_common_nocd() 1097 { 1098 proc_t *proc; 1099 1100 if ((proc = ttoproc(curthread)) != NULL && 1101 !(proc->p_flag & SNOCD)) 1102 return (1); 1103 1104 return (0); 1105 } 1106 1107 static int 1108 dtrace_priv_proc_destructive(dtrace_state_t *state) 1109 { 1110 int action = state->dts_cred.dcr_action; 1111 1112 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1113 dtrace_priv_proc_common_zone(state) == 0) 1114 goto bad; 1115 1116 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1117 dtrace_priv_proc_common_user(state) == 0) 1118 goto bad; 1119 1120 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1121 dtrace_priv_proc_common_nocd() == 0) 1122 goto bad; 1123 1124 return (1); 1125 1126 bad: 1127 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1128 1129 return (0); 1130 } 1131 1132 static int 1133 dtrace_priv_proc_control(dtrace_state_t *state) 1134 { 1135 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1136 return (1); 1137 1138 if (dtrace_priv_proc_common_zone(state) && 1139 dtrace_priv_proc_common_user(state) && 1140 dtrace_priv_proc_common_nocd()) 1141 return (1); 1142 1143 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1144 1145 return (0); 1146 } 1147 1148 static int 1149 dtrace_priv_proc(dtrace_state_t *state) 1150 { 1151 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1152 return (1); 1153 1154 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1155 1156 return (0); 1157 } 1158 1159 static int 1160 dtrace_priv_kernel(dtrace_state_t *state) 1161 { 1162 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1163 return (1); 1164 1165 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1166 1167 return (0); 1168 } 1169 1170 static int 1171 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1172 { 1173 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1174 return (1); 1175 1176 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1177 1178 return (0); 1179 } 1180 1181 /* 1182 * Note: not called from probe context. This function is called 1183 * asynchronously (and at a regular interval) from outside of probe context to 1184 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1185 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1186 */ 1187 void 1188 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1189 { 1190 dtrace_dynvar_t *dirty; 1191 dtrace_dstate_percpu_t *dcpu; 1192 dtrace_dynvar_t **rinsep; 1193 int i, j, work = 0; 1194 1195 for (i = 0; i < NCPU; i++) { 1196 dcpu = &dstate->dtds_percpu[i]; 1197 rinsep = &dcpu->dtdsc_rinsing; 1198 1199 /* 1200 * If the dirty list is NULL, there is no dirty work to do. 1201 */ 1202 if (dcpu->dtdsc_dirty == NULL) 1203 continue; 1204 1205 if (dcpu->dtdsc_rinsing != NULL) { 1206 /* 1207 * If the rinsing list is non-NULL, then it is because 1208 * this CPU was selected to accept another CPU's 1209 * dirty list -- and since that time, dirty buffers 1210 * have accumulated. This is a highly unlikely 1211 * condition, but we choose to ignore the dirty 1212 * buffers -- they'll be picked up a future cleanse. 1213 */ 1214 continue; 1215 } 1216 1217 if (dcpu->dtdsc_clean != NULL) { 1218 /* 1219 * If the clean list is non-NULL, then we're in a 1220 * situation where a CPU has done deallocations (we 1221 * have a non-NULL dirty list) but no allocations (we 1222 * also have a non-NULL clean list). We can't simply 1223 * move the dirty list into the clean list on this 1224 * CPU, yet we also don't want to allow this condition 1225 * to persist, lest a short clean list prevent a 1226 * massive dirty list from being cleaned (which in 1227 * turn could lead to otherwise avoidable dynamic 1228 * drops). To deal with this, we look for some CPU 1229 * with a NULL clean list, NULL dirty list, and NULL 1230 * rinsing list -- and then we borrow this CPU to 1231 * rinse our dirty list. 1232 */ 1233 for (j = 0; j < NCPU; j++) { 1234 dtrace_dstate_percpu_t *rinser; 1235 1236 rinser = &dstate->dtds_percpu[j]; 1237 1238 if (rinser->dtdsc_rinsing != NULL) 1239 continue; 1240 1241 if (rinser->dtdsc_dirty != NULL) 1242 continue; 1243 1244 if (rinser->dtdsc_clean != NULL) 1245 continue; 1246 1247 rinsep = &rinser->dtdsc_rinsing; 1248 break; 1249 } 1250 1251 if (j == NCPU) { 1252 /* 1253 * We were unable to find another CPU that 1254 * could accept this dirty list -- we are 1255 * therefore unable to clean it now. 1256 */ 1257 dtrace_dynvar_failclean++; 1258 continue; 1259 } 1260 } 1261 1262 work = 1; 1263 1264 /* 1265 * Atomically move the dirty list aside. 1266 */ 1267 do { 1268 dirty = dcpu->dtdsc_dirty; 1269 1270 /* 1271 * Before we zap the dirty list, set the rinsing list. 1272 * (This allows for a potential assertion in 1273 * dtrace_dynvar(): if a free dynamic variable appears 1274 * on a hash chain, either the dirty list or the 1275 * rinsing list for some CPU must be non-NULL.) 1276 */ 1277 *rinsep = dirty; 1278 dtrace_membar_producer(); 1279 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1280 dirty, NULL) != dirty); 1281 } 1282 1283 if (!work) { 1284 /* 1285 * We have no work to do; we can simply return. 1286 */ 1287 return; 1288 } 1289 1290 dtrace_sync(); 1291 1292 for (i = 0; i < NCPU; i++) { 1293 dcpu = &dstate->dtds_percpu[i]; 1294 1295 if (dcpu->dtdsc_rinsing == NULL) 1296 continue; 1297 1298 /* 1299 * We are now guaranteed that no hash chain contains a pointer 1300 * into this dirty list; we can make it clean. 1301 */ 1302 ASSERT(dcpu->dtdsc_clean == NULL); 1303 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1304 dcpu->dtdsc_rinsing = NULL; 1305 } 1306 1307 /* 1308 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1309 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1310 * This prevents a race whereby a CPU incorrectly decides that 1311 * the state should be something other than DTRACE_DSTATE_CLEAN 1312 * after dtrace_dynvar_clean() has completed. 1313 */ 1314 dtrace_sync(); 1315 1316 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1317 } 1318 1319 /* 1320 * Depending on the value of the op parameter, this function looks-up, 1321 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1322 * allocation is requested, this function will return a pointer to a 1323 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1324 * variable can be allocated. If NULL is returned, the appropriate counter 1325 * will be incremented. 1326 */ 1327 dtrace_dynvar_t * 1328 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1329 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1330 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1331 { 1332 uint64_t hashval = DTRACE_DYNHASH_VALID; 1333 dtrace_dynhash_t *hash = dstate->dtds_hash; 1334 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1335 processorid_t me = CPU->cpu_id, cpu = me; 1336 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1337 size_t bucket, ksize; 1338 size_t chunksize = dstate->dtds_chunksize; 1339 uintptr_t kdata, lock, nstate; 1340 uint_t i; 1341 1342 ASSERT(nkeys != 0); 1343 1344 /* 1345 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1346 * algorithm. For the by-value portions, we perform the algorithm in 1347 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1348 * bit, and seems to have only a minute effect on distribution. For 1349 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1350 * over each referenced byte. It's painful to do this, but it's much 1351 * better than pathological hash distribution. The efficacy of the 1352 * hashing algorithm (and a comparison with other algorithms) may be 1353 * found by running the ::dtrace_dynstat MDB dcmd. 1354 */ 1355 for (i = 0; i < nkeys; i++) { 1356 if (key[i].dttk_size == 0) { 1357 uint64_t val = key[i].dttk_value; 1358 1359 hashval += (val >> 48) & 0xffff; 1360 hashval += (hashval << 10); 1361 hashval ^= (hashval >> 6); 1362 1363 hashval += (val >> 32) & 0xffff; 1364 hashval += (hashval << 10); 1365 hashval ^= (hashval >> 6); 1366 1367 hashval += (val >> 16) & 0xffff; 1368 hashval += (hashval << 10); 1369 hashval ^= (hashval >> 6); 1370 1371 hashval += val & 0xffff; 1372 hashval += (hashval << 10); 1373 hashval ^= (hashval >> 6); 1374 } else { 1375 /* 1376 * This is incredibly painful, but it beats the hell 1377 * out of the alternative. 1378 */ 1379 uint64_t j, size = key[i].dttk_size; 1380 uintptr_t base = (uintptr_t)key[i].dttk_value; 1381 1382 if (!dtrace_canload(base, size, mstate, vstate)) 1383 break; 1384 1385 for (j = 0; j < size; j++) { 1386 hashval += dtrace_load8(base + j); 1387 hashval += (hashval << 10); 1388 hashval ^= (hashval >> 6); 1389 } 1390 } 1391 } 1392 1393 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1394 return (NULL); 1395 1396 hashval += (hashval << 3); 1397 hashval ^= (hashval >> 11); 1398 hashval += (hashval << 15); 1399 1400 /* 1401 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1402 * comes out to be one of our two sentinel hash values. If this 1403 * actually happens, we set the hashval to be a value known to be a 1404 * non-sentinel value. 1405 */ 1406 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1407 hashval = DTRACE_DYNHASH_VALID; 1408 1409 /* 1410 * Yes, it's painful to do a divide here. If the cycle count becomes 1411 * important here, tricks can be pulled to reduce it. (However, it's 1412 * critical that hash collisions be kept to an absolute minimum; 1413 * they're much more painful than a divide.) It's better to have a 1414 * solution that generates few collisions and still keeps things 1415 * relatively simple. 1416 */ 1417 bucket = hashval % dstate->dtds_hashsize; 1418 1419 if (op == DTRACE_DYNVAR_DEALLOC) { 1420 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1421 1422 for (;;) { 1423 while ((lock = *lockp) & 1) 1424 continue; 1425 1426 if (dtrace_casptr((void *)lockp, 1427 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1428 break; 1429 } 1430 1431 dtrace_membar_producer(); 1432 } 1433 1434 top: 1435 prev = NULL; 1436 lock = hash[bucket].dtdh_lock; 1437 1438 dtrace_membar_consumer(); 1439 1440 start = hash[bucket].dtdh_chain; 1441 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1442 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1443 op != DTRACE_DYNVAR_DEALLOC)); 1444 1445 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1446 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1447 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1448 1449 if (dvar->dtdv_hashval != hashval) { 1450 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1451 /* 1452 * We've reached the sink, and therefore the 1453 * end of the hash chain; we can kick out of 1454 * the loop knowing that we have seen a valid 1455 * snapshot of state. 1456 */ 1457 ASSERT(dvar->dtdv_next == NULL); 1458 ASSERT(dvar == &dtrace_dynhash_sink); 1459 break; 1460 } 1461 1462 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1463 /* 1464 * We've gone off the rails: somewhere along 1465 * the line, one of the members of this hash 1466 * chain was deleted. Note that we could also 1467 * detect this by simply letting this loop run 1468 * to completion, as we would eventually hit 1469 * the end of the dirty list. However, we 1470 * want to avoid running the length of the 1471 * dirty list unnecessarily (it might be quite 1472 * long), so we catch this as early as 1473 * possible by detecting the hash marker. In 1474 * this case, we simply set dvar to NULL and 1475 * break; the conditional after the loop will 1476 * send us back to top. 1477 */ 1478 dvar = NULL; 1479 break; 1480 } 1481 1482 goto next; 1483 } 1484 1485 if (dtuple->dtt_nkeys != nkeys) 1486 goto next; 1487 1488 for (i = 0; i < nkeys; i++, dkey++) { 1489 if (dkey->dttk_size != key[i].dttk_size) 1490 goto next; /* size or type mismatch */ 1491 1492 if (dkey->dttk_size != 0) { 1493 if (dtrace_bcmp( 1494 (void *)(uintptr_t)key[i].dttk_value, 1495 (void *)(uintptr_t)dkey->dttk_value, 1496 dkey->dttk_size)) 1497 goto next; 1498 } else { 1499 if (dkey->dttk_value != key[i].dttk_value) 1500 goto next; 1501 } 1502 } 1503 1504 if (op != DTRACE_DYNVAR_DEALLOC) 1505 return (dvar); 1506 1507 ASSERT(dvar->dtdv_next == NULL || 1508 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1509 1510 if (prev != NULL) { 1511 ASSERT(hash[bucket].dtdh_chain != dvar); 1512 ASSERT(start != dvar); 1513 ASSERT(prev->dtdv_next == dvar); 1514 prev->dtdv_next = dvar->dtdv_next; 1515 } else { 1516 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1517 start, dvar->dtdv_next) != start) { 1518 /* 1519 * We have failed to atomically swing the 1520 * hash table head pointer, presumably because 1521 * of a conflicting allocation on another CPU. 1522 * We need to reread the hash chain and try 1523 * again. 1524 */ 1525 goto top; 1526 } 1527 } 1528 1529 dtrace_membar_producer(); 1530 1531 /* 1532 * Now set the hash value to indicate that it's free. 1533 */ 1534 ASSERT(hash[bucket].dtdh_chain != dvar); 1535 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1536 1537 dtrace_membar_producer(); 1538 1539 /* 1540 * Set the next pointer to point at the dirty list, and 1541 * atomically swing the dirty pointer to the newly freed dvar. 1542 */ 1543 do { 1544 next = dcpu->dtdsc_dirty; 1545 dvar->dtdv_next = next; 1546 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1547 1548 /* 1549 * Finally, unlock this hash bucket. 1550 */ 1551 ASSERT(hash[bucket].dtdh_lock == lock); 1552 ASSERT(lock & 1); 1553 hash[bucket].dtdh_lock++; 1554 1555 return (NULL); 1556 next: 1557 prev = dvar; 1558 continue; 1559 } 1560 1561 if (dvar == NULL) { 1562 /* 1563 * If dvar is NULL, it is because we went off the rails: 1564 * one of the elements that we traversed in the hash chain 1565 * was deleted while we were traversing it. In this case, 1566 * we assert that we aren't doing a dealloc (deallocs lock 1567 * the hash bucket to prevent themselves from racing with 1568 * one another), and retry the hash chain traversal. 1569 */ 1570 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1571 goto top; 1572 } 1573 1574 if (op != DTRACE_DYNVAR_ALLOC) { 1575 /* 1576 * If we are not to allocate a new variable, we want to 1577 * return NULL now. Before we return, check that the value 1578 * of the lock word hasn't changed. If it has, we may have 1579 * seen an inconsistent snapshot. 1580 */ 1581 if (op == DTRACE_DYNVAR_NOALLOC) { 1582 if (hash[bucket].dtdh_lock != lock) 1583 goto top; 1584 } else { 1585 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1586 ASSERT(hash[bucket].dtdh_lock == lock); 1587 ASSERT(lock & 1); 1588 hash[bucket].dtdh_lock++; 1589 } 1590 1591 return (NULL); 1592 } 1593 1594 /* 1595 * We need to allocate a new dynamic variable. The size we need is the 1596 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1597 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1598 * the size of any referred-to data (dsize). We then round the final 1599 * size up to the chunksize for allocation. 1600 */ 1601 for (ksize = 0, i = 0; i < nkeys; i++) 1602 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1603 1604 /* 1605 * This should be pretty much impossible, but could happen if, say, 1606 * strange DIF specified the tuple. Ideally, this should be an 1607 * assertion and not an error condition -- but that requires that the 1608 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1609 * bullet-proof. (That is, it must not be able to be fooled by 1610 * malicious DIF.) Given the lack of backwards branches in DIF, 1611 * solving this would presumably not amount to solving the Halting 1612 * Problem -- but it still seems awfully hard. 1613 */ 1614 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1615 ksize + dsize > chunksize) { 1616 dcpu->dtdsc_drops++; 1617 return (NULL); 1618 } 1619 1620 nstate = DTRACE_DSTATE_EMPTY; 1621 1622 do { 1623 retry: 1624 free = dcpu->dtdsc_free; 1625 1626 if (free == NULL) { 1627 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1628 void *rval; 1629 1630 if (clean == NULL) { 1631 /* 1632 * We're out of dynamic variable space on 1633 * this CPU. Unless we have tried all CPUs, 1634 * we'll try to allocate from a different 1635 * CPU. 1636 */ 1637 switch (dstate->dtds_state) { 1638 case DTRACE_DSTATE_CLEAN: { 1639 void *sp = &dstate->dtds_state; 1640 1641 if (++cpu >= NCPU) 1642 cpu = 0; 1643 1644 if (dcpu->dtdsc_dirty != NULL && 1645 nstate == DTRACE_DSTATE_EMPTY) 1646 nstate = DTRACE_DSTATE_DIRTY; 1647 1648 if (dcpu->dtdsc_rinsing != NULL) 1649 nstate = DTRACE_DSTATE_RINSING; 1650 1651 dcpu = &dstate->dtds_percpu[cpu]; 1652 1653 if (cpu != me) 1654 goto retry; 1655 1656 (void) dtrace_cas32(sp, 1657 DTRACE_DSTATE_CLEAN, nstate); 1658 1659 /* 1660 * To increment the correct bean 1661 * counter, take another lap. 1662 */ 1663 goto retry; 1664 } 1665 1666 case DTRACE_DSTATE_DIRTY: 1667 dcpu->dtdsc_dirty_drops++; 1668 break; 1669 1670 case DTRACE_DSTATE_RINSING: 1671 dcpu->dtdsc_rinsing_drops++; 1672 break; 1673 1674 case DTRACE_DSTATE_EMPTY: 1675 dcpu->dtdsc_drops++; 1676 break; 1677 } 1678 1679 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1680 return (NULL); 1681 } 1682 1683 /* 1684 * The clean list appears to be non-empty. We want to 1685 * move the clean list to the free list; we start by 1686 * moving the clean pointer aside. 1687 */ 1688 if (dtrace_casptr(&dcpu->dtdsc_clean, 1689 clean, NULL) != clean) { 1690 /* 1691 * We are in one of two situations: 1692 * 1693 * (a) The clean list was switched to the 1694 * free list by another CPU. 1695 * 1696 * (b) The clean list was added to by the 1697 * cleansing cyclic. 1698 * 1699 * In either of these situations, we can 1700 * just reattempt the free list allocation. 1701 */ 1702 goto retry; 1703 } 1704 1705 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1706 1707 /* 1708 * Now we'll move the clean list to our free list. 1709 * It's impossible for this to fail: the only way 1710 * the free list can be updated is through this 1711 * code path, and only one CPU can own the clean list. 1712 * Thus, it would only be possible for this to fail if 1713 * this code were racing with dtrace_dynvar_clean(). 1714 * (That is, if dtrace_dynvar_clean() updated the clean 1715 * list, and we ended up racing to update the free 1716 * list.) This race is prevented by the dtrace_sync() 1717 * in dtrace_dynvar_clean() -- which flushes the 1718 * owners of the clean lists out before resetting 1719 * the clean lists. 1720 */ 1721 dcpu = &dstate->dtds_percpu[me]; 1722 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1723 ASSERT(rval == NULL); 1724 goto retry; 1725 } 1726 1727 dvar = free; 1728 new_free = dvar->dtdv_next; 1729 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1730 1731 /* 1732 * We have now allocated a new chunk. We copy the tuple keys into the 1733 * tuple array and copy any referenced key data into the data space 1734 * following the tuple array. As we do this, we relocate dttk_value 1735 * in the final tuple to point to the key data address in the chunk. 1736 */ 1737 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1738 dvar->dtdv_data = (void *)(kdata + ksize); 1739 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1740 1741 for (i = 0; i < nkeys; i++) { 1742 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1743 size_t kesize = key[i].dttk_size; 1744 1745 if (kesize != 0) { 1746 dtrace_bcopy( 1747 (const void *)(uintptr_t)key[i].dttk_value, 1748 (void *)kdata, kesize); 1749 dkey->dttk_value = kdata; 1750 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1751 } else { 1752 dkey->dttk_value = key[i].dttk_value; 1753 } 1754 1755 dkey->dttk_size = kesize; 1756 } 1757 1758 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1759 dvar->dtdv_hashval = hashval; 1760 dvar->dtdv_next = start; 1761 1762 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1763 return (dvar); 1764 1765 /* 1766 * The cas has failed. Either another CPU is adding an element to 1767 * this hash chain, or another CPU is deleting an element from this 1768 * hash chain. The simplest way to deal with both of these cases 1769 * (though not necessarily the most efficient) is to free our 1770 * allocated block and tail-call ourselves. Note that the free is 1771 * to the dirty list and _not_ to the free list. This is to prevent 1772 * races with allocators, above. 1773 */ 1774 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1775 1776 dtrace_membar_producer(); 1777 1778 do { 1779 free = dcpu->dtdsc_dirty; 1780 dvar->dtdv_next = free; 1781 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1782 1783 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1784 } 1785 1786 /*ARGSUSED*/ 1787 static void 1788 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1789 { 1790 if ((int64_t)nval < (int64_t)*oval) 1791 *oval = nval; 1792 } 1793 1794 /*ARGSUSED*/ 1795 static void 1796 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1797 { 1798 if ((int64_t)nval > (int64_t)*oval) 1799 *oval = nval; 1800 } 1801 1802 static void 1803 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1804 { 1805 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1806 int64_t val = (int64_t)nval; 1807 1808 if (val < 0) { 1809 for (i = 0; i < zero; i++) { 1810 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1811 quanta[i] += incr; 1812 return; 1813 } 1814 } 1815 } else { 1816 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1817 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1818 quanta[i - 1] += incr; 1819 return; 1820 } 1821 } 1822 1823 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1824 return; 1825 } 1826 1827 ASSERT(0); 1828 } 1829 1830 static void 1831 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1832 { 1833 uint64_t arg = *lquanta++; 1834 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1835 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1836 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1837 int32_t val = (int32_t)nval, level; 1838 1839 ASSERT(step != 0); 1840 ASSERT(levels != 0); 1841 1842 if (val < base) { 1843 /* 1844 * This is an underflow. 1845 */ 1846 lquanta[0] += incr; 1847 return; 1848 } 1849 1850 level = (val - base) / step; 1851 1852 if (level < levels) { 1853 lquanta[level + 1] += incr; 1854 return; 1855 } 1856 1857 /* 1858 * This is an overflow. 1859 */ 1860 lquanta[levels + 1] += incr; 1861 } 1862 1863 static int 1864 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1865 uint16_t high, uint16_t nsteps, int64_t value) 1866 { 1867 int64_t this = 1, last, next; 1868 int base = 1, order; 1869 1870 ASSERT(factor <= nsteps); 1871 ASSERT(nsteps % factor == 0); 1872 1873 for (order = 0; order < low; order++) 1874 this *= factor; 1875 1876 /* 1877 * If our value is less than our factor taken to the power of the 1878 * low order of magnitude, it goes into the zeroth bucket. 1879 */ 1880 if (value < (last = this)) 1881 return (0); 1882 1883 for (this *= factor; order <= high; order++) { 1884 int nbuckets = this > nsteps ? nsteps : this; 1885 1886 if ((next = this * factor) < this) { 1887 /* 1888 * We should not generally get log/linear quantizations 1889 * with a high magnitude that allows 64-bits to 1890 * overflow, but we nonetheless protect against this 1891 * by explicitly checking for overflow, and clamping 1892 * our value accordingly. 1893 */ 1894 value = this - 1; 1895 } 1896 1897 if (value < this) { 1898 /* 1899 * If our value lies within this order of magnitude, 1900 * determine its position by taking the offset within 1901 * the order of magnitude, dividing by the bucket 1902 * width, and adding to our (accumulated) base. 1903 */ 1904 return (base + (value - last) / (this / nbuckets)); 1905 } 1906 1907 base += nbuckets - (nbuckets / factor); 1908 last = this; 1909 this = next; 1910 } 1911 1912 /* 1913 * Our value is greater than or equal to our factor taken to the 1914 * power of one plus the high magnitude -- return the top bucket. 1915 */ 1916 return (base); 1917 } 1918 1919 static void 1920 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1921 { 1922 uint64_t arg = *llquanta++; 1923 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1924 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1925 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1926 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1927 1928 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1929 low, high, nsteps, nval)] += incr; 1930 } 1931 1932 /*ARGSUSED*/ 1933 static void 1934 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1935 { 1936 data[0]++; 1937 data[1] += nval; 1938 } 1939 1940 /*ARGSUSED*/ 1941 static void 1942 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1943 { 1944 int64_t snval = (int64_t)nval; 1945 uint64_t tmp[2]; 1946 1947 data[0]++; 1948 data[1] += nval; 1949 1950 /* 1951 * What we want to say here is: 1952 * 1953 * data[2] += nval * nval; 1954 * 1955 * But given that nval is 64-bit, we could easily overflow, so 1956 * we do this as 128-bit arithmetic. 1957 */ 1958 if (snval < 0) 1959 snval = -snval; 1960 1961 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1962 dtrace_add_128(data + 2, tmp, data + 2); 1963 } 1964 1965 /*ARGSUSED*/ 1966 static void 1967 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1968 { 1969 *oval = *oval + 1; 1970 } 1971 1972 /*ARGSUSED*/ 1973 static void 1974 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1975 { 1976 *oval += nval; 1977 } 1978 1979 /* 1980 * Aggregate given the tuple in the principal data buffer, and the aggregating 1981 * action denoted by the specified dtrace_aggregation_t. The aggregation 1982 * buffer is specified as the buf parameter. This routine does not return 1983 * failure; if there is no space in the aggregation buffer, the data will be 1984 * dropped, and a corresponding counter incremented. 1985 */ 1986 static void 1987 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1988 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1989 { 1990 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1991 uint32_t i, ndx, size, fsize; 1992 uint32_t align = sizeof (uint64_t) - 1; 1993 dtrace_aggbuffer_t *agb; 1994 dtrace_aggkey_t *key; 1995 uint32_t hashval = 0, limit, isstr; 1996 caddr_t tomax, data, kdata; 1997 dtrace_actkind_t action; 1998 dtrace_action_t *act; 1999 uintptr_t offs; 2000 2001 if (buf == NULL) 2002 return; 2003 2004 if (!agg->dtag_hasarg) { 2005 /* 2006 * Currently, only quantize() and lquantize() take additional 2007 * arguments, and they have the same semantics: an increment 2008 * value that defaults to 1 when not present. If additional 2009 * aggregating actions take arguments, the setting of the 2010 * default argument value will presumably have to become more 2011 * sophisticated... 2012 */ 2013 arg = 1; 2014 } 2015 2016 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2017 size = rec->dtrd_offset - agg->dtag_base; 2018 fsize = size + rec->dtrd_size; 2019 2020 ASSERT(dbuf->dtb_tomax != NULL); 2021 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2022 2023 if ((tomax = buf->dtb_tomax) == NULL) { 2024 dtrace_buffer_drop(buf); 2025 return; 2026 } 2027 2028 /* 2029 * The metastructure is always at the bottom of the buffer. 2030 */ 2031 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2032 sizeof (dtrace_aggbuffer_t)); 2033 2034 if (buf->dtb_offset == 0) { 2035 /* 2036 * We just kludge up approximately 1/8th of the size to be 2037 * buckets. If this guess ends up being routinely 2038 * off-the-mark, we may need to dynamically readjust this 2039 * based on past performance. 2040 */ 2041 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2042 2043 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2044 (uintptr_t)tomax || hashsize == 0) { 2045 /* 2046 * We've been given a ludicrously small buffer; 2047 * increment our drop count and leave. 2048 */ 2049 dtrace_buffer_drop(buf); 2050 return; 2051 } 2052 2053 /* 2054 * And now, a pathetic attempt to try to get a an odd (or 2055 * perchance, a prime) hash size for better hash distribution. 2056 */ 2057 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2058 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2059 2060 agb->dtagb_hashsize = hashsize; 2061 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2062 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2063 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2064 2065 for (i = 0; i < agb->dtagb_hashsize; i++) 2066 agb->dtagb_hash[i] = NULL; 2067 } 2068 2069 ASSERT(agg->dtag_first != NULL); 2070 ASSERT(agg->dtag_first->dta_intuple); 2071 2072 /* 2073 * Calculate the hash value based on the key. Note that we _don't_ 2074 * include the aggid in the hashing (but we will store it as part of 2075 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2076 * algorithm: a simple, quick algorithm that has no known funnels, and 2077 * gets good distribution in practice. The efficacy of the hashing 2078 * algorithm (and a comparison with other algorithms) may be found by 2079 * running the ::dtrace_aggstat MDB dcmd. 2080 */ 2081 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2082 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2083 limit = i + act->dta_rec.dtrd_size; 2084 ASSERT(limit <= size); 2085 isstr = DTRACEACT_ISSTRING(act); 2086 2087 for (; i < limit; i++) { 2088 hashval += data[i]; 2089 hashval += (hashval << 10); 2090 hashval ^= (hashval >> 6); 2091 2092 if (isstr && data[i] == '\0') 2093 break; 2094 } 2095 } 2096 2097 hashval += (hashval << 3); 2098 hashval ^= (hashval >> 11); 2099 hashval += (hashval << 15); 2100 2101 /* 2102 * Yes, the divide here is expensive -- but it's generally the least 2103 * of the performance issues given the amount of data that we iterate 2104 * over to compute hash values, compare data, etc. 2105 */ 2106 ndx = hashval % agb->dtagb_hashsize; 2107 2108 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2109 ASSERT((caddr_t)key >= tomax); 2110 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2111 2112 if (hashval != key->dtak_hashval || key->dtak_size != size) 2113 continue; 2114 2115 kdata = key->dtak_data; 2116 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2117 2118 for (act = agg->dtag_first; act->dta_intuple; 2119 act = act->dta_next) { 2120 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2121 limit = i + act->dta_rec.dtrd_size; 2122 ASSERT(limit <= size); 2123 isstr = DTRACEACT_ISSTRING(act); 2124 2125 for (; i < limit; i++) { 2126 if (kdata[i] != data[i]) 2127 goto next; 2128 2129 if (isstr && data[i] == '\0') 2130 break; 2131 } 2132 } 2133 2134 if (action != key->dtak_action) { 2135 /* 2136 * We are aggregating on the same value in the same 2137 * aggregation with two different aggregating actions. 2138 * (This should have been picked up in the compiler, 2139 * so we may be dealing with errant or devious DIF.) 2140 * This is an error condition; we indicate as much, 2141 * and return. 2142 */ 2143 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2144 return; 2145 } 2146 2147 /* 2148 * This is a hit: we need to apply the aggregator to 2149 * the value at this key. 2150 */ 2151 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2152 return; 2153 next: 2154 continue; 2155 } 2156 2157 /* 2158 * We didn't find it. We need to allocate some zero-filled space, 2159 * link it into the hash table appropriately, and apply the aggregator 2160 * to the (zero-filled) value. 2161 */ 2162 offs = buf->dtb_offset; 2163 while (offs & (align - 1)) 2164 offs += sizeof (uint32_t); 2165 2166 /* 2167 * If we don't have enough room to both allocate a new key _and_ 2168 * its associated data, increment the drop count and return. 2169 */ 2170 if ((uintptr_t)tomax + offs + fsize > 2171 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2172 dtrace_buffer_drop(buf); 2173 return; 2174 } 2175 2176 /*CONSTCOND*/ 2177 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2178 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2179 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2180 2181 key->dtak_data = kdata = tomax + offs; 2182 buf->dtb_offset = offs + fsize; 2183 2184 /* 2185 * Now copy the data across. 2186 */ 2187 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2188 2189 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2190 kdata[i] = data[i]; 2191 2192 /* 2193 * Because strings are not zeroed out by default, we need to iterate 2194 * looking for actions that store strings, and we need to explicitly 2195 * pad these strings out with zeroes. 2196 */ 2197 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2198 int nul; 2199 2200 if (!DTRACEACT_ISSTRING(act)) 2201 continue; 2202 2203 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2204 limit = i + act->dta_rec.dtrd_size; 2205 ASSERT(limit <= size); 2206 2207 for (nul = 0; i < limit; i++) { 2208 if (nul) { 2209 kdata[i] = '\0'; 2210 continue; 2211 } 2212 2213 if (data[i] != '\0') 2214 continue; 2215 2216 nul = 1; 2217 } 2218 } 2219 2220 for (i = size; i < fsize; i++) 2221 kdata[i] = 0; 2222 2223 key->dtak_hashval = hashval; 2224 key->dtak_size = size; 2225 key->dtak_action = action; 2226 key->dtak_next = agb->dtagb_hash[ndx]; 2227 agb->dtagb_hash[ndx] = key; 2228 2229 /* 2230 * Finally, apply the aggregator. 2231 */ 2232 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2233 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2234 } 2235 2236 /* 2237 * Given consumer state, this routine finds a speculation in the INACTIVE 2238 * state and transitions it into the ACTIVE state. If there is no speculation 2239 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2240 * incremented -- it is up to the caller to take appropriate action. 2241 */ 2242 static int 2243 dtrace_speculation(dtrace_state_t *state) 2244 { 2245 int i = 0; 2246 dtrace_speculation_state_t current; 2247 uint32_t *stat = &state->dts_speculations_unavail, count; 2248 2249 while (i < state->dts_nspeculations) { 2250 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2251 2252 current = spec->dtsp_state; 2253 2254 if (current != DTRACESPEC_INACTIVE) { 2255 if (current == DTRACESPEC_COMMITTINGMANY || 2256 current == DTRACESPEC_COMMITTING || 2257 current == DTRACESPEC_DISCARDING) 2258 stat = &state->dts_speculations_busy; 2259 i++; 2260 continue; 2261 } 2262 2263 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2264 current, DTRACESPEC_ACTIVE) == current) 2265 return (i + 1); 2266 } 2267 2268 /* 2269 * We couldn't find a speculation. If we found as much as a single 2270 * busy speculation buffer, we'll attribute this failure as "busy" 2271 * instead of "unavail". 2272 */ 2273 do { 2274 count = *stat; 2275 } while (dtrace_cas32(stat, count, count + 1) != count); 2276 2277 return (0); 2278 } 2279 2280 /* 2281 * This routine commits an active speculation. If the specified speculation 2282 * is not in a valid state to perform a commit(), this routine will silently do 2283 * nothing. The state of the specified speculation is transitioned according 2284 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2285 */ 2286 static void 2287 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2288 dtrace_specid_t which) 2289 { 2290 dtrace_speculation_t *spec; 2291 dtrace_buffer_t *src, *dest; 2292 uintptr_t daddr, saddr, dlimit; 2293 dtrace_speculation_state_t current, new; 2294 intptr_t offs; 2295 2296 if (which == 0) 2297 return; 2298 2299 if (which > state->dts_nspeculations) { 2300 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2301 return; 2302 } 2303 2304 spec = &state->dts_speculations[which - 1]; 2305 src = &spec->dtsp_buffer[cpu]; 2306 dest = &state->dts_buffer[cpu]; 2307 2308 do { 2309 current = spec->dtsp_state; 2310 2311 if (current == DTRACESPEC_COMMITTINGMANY) 2312 break; 2313 2314 switch (current) { 2315 case DTRACESPEC_INACTIVE: 2316 case DTRACESPEC_DISCARDING: 2317 return; 2318 2319 case DTRACESPEC_COMMITTING: 2320 /* 2321 * This is only possible if we are (a) commit()'ing 2322 * without having done a prior speculate() on this CPU 2323 * and (b) racing with another commit() on a different 2324 * CPU. There's nothing to do -- we just assert that 2325 * our offset is 0. 2326 */ 2327 ASSERT(src->dtb_offset == 0); 2328 return; 2329 2330 case DTRACESPEC_ACTIVE: 2331 new = DTRACESPEC_COMMITTING; 2332 break; 2333 2334 case DTRACESPEC_ACTIVEONE: 2335 /* 2336 * This speculation is active on one CPU. If our 2337 * buffer offset is non-zero, we know that the one CPU 2338 * must be us. Otherwise, we are committing on a 2339 * different CPU from the speculate(), and we must 2340 * rely on being asynchronously cleaned. 2341 */ 2342 if (src->dtb_offset != 0) { 2343 new = DTRACESPEC_COMMITTING; 2344 break; 2345 } 2346 /*FALLTHROUGH*/ 2347 2348 case DTRACESPEC_ACTIVEMANY: 2349 new = DTRACESPEC_COMMITTINGMANY; 2350 break; 2351 2352 default: 2353 ASSERT(0); 2354 } 2355 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2356 current, new) != current); 2357 2358 /* 2359 * We have set the state to indicate that we are committing this 2360 * speculation. Now reserve the necessary space in the destination 2361 * buffer. 2362 */ 2363 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2364 sizeof (uint64_t), state, NULL)) < 0) { 2365 dtrace_buffer_drop(dest); 2366 goto out; 2367 } 2368 2369 /* 2370 * We have the space; copy the buffer across. (Note that this is a 2371 * highly subobtimal bcopy(); in the unlikely event that this becomes 2372 * a serious performance issue, a high-performance DTrace-specific 2373 * bcopy() should obviously be invented.) 2374 */ 2375 daddr = (uintptr_t)dest->dtb_tomax + offs; 2376 dlimit = daddr + src->dtb_offset; 2377 saddr = (uintptr_t)src->dtb_tomax; 2378 2379 /* 2380 * First, the aligned portion. 2381 */ 2382 while (dlimit - daddr >= sizeof (uint64_t)) { 2383 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2384 2385 daddr += sizeof (uint64_t); 2386 saddr += sizeof (uint64_t); 2387 } 2388 2389 /* 2390 * Now any left-over bit... 2391 */ 2392 while (dlimit - daddr) 2393 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2394 2395 /* 2396 * Finally, commit the reserved space in the destination buffer. 2397 */ 2398 dest->dtb_offset = offs + src->dtb_offset; 2399 2400 out: 2401 /* 2402 * If we're lucky enough to be the only active CPU on this speculation 2403 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2404 */ 2405 if (current == DTRACESPEC_ACTIVE || 2406 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2407 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2408 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2409 2410 ASSERT(rval == DTRACESPEC_COMMITTING); 2411 } 2412 2413 src->dtb_offset = 0; 2414 src->dtb_xamot_drops += src->dtb_drops; 2415 src->dtb_drops = 0; 2416 } 2417 2418 /* 2419 * This routine discards an active speculation. If the specified speculation 2420 * is not in a valid state to perform a discard(), this routine will silently 2421 * do nothing. The state of the specified speculation is transitioned 2422 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2423 */ 2424 static void 2425 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2426 dtrace_specid_t which) 2427 { 2428 dtrace_speculation_t *spec; 2429 dtrace_speculation_state_t current, new; 2430 dtrace_buffer_t *buf; 2431 2432 if (which == 0) 2433 return; 2434 2435 if (which > state->dts_nspeculations) { 2436 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2437 return; 2438 } 2439 2440 spec = &state->dts_speculations[which - 1]; 2441 buf = &spec->dtsp_buffer[cpu]; 2442 2443 do { 2444 current = spec->dtsp_state; 2445 2446 switch (current) { 2447 case DTRACESPEC_INACTIVE: 2448 case DTRACESPEC_COMMITTINGMANY: 2449 case DTRACESPEC_COMMITTING: 2450 case DTRACESPEC_DISCARDING: 2451 return; 2452 2453 case DTRACESPEC_ACTIVE: 2454 case DTRACESPEC_ACTIVEMANY: 2455 new = DTRACESPEC_DISCARDING; 2456 break; 2457 2458 case DTRACESPEC_ACTIVEONE: 2459 if (buf->dtb_offset != 0) { 2460 new = DTRACESPEC_INACTIVE; 2461 } else { 2462 new = DTRACESPEC_DISCARDING; 2463 } 2464 break; 2465 2466 default: 2467 ASSERT(0); 2468 } 2469 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2470 current, new) != current); 2471 2472 buf->dtb_offset = 0; 2473 buf->dtb_drops = 0; 2474 } 2475 2476 /* 2477 * Note: not called from probe context. This function is called 2478 * asynchronously from cross call context to clean any speculations that are 2479 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2480 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2481 * speculation. 2482 */ 2483 static void 2484 dtrace_speculation_clean_here(dtrace_state_t *state) 2485 { 2486 dtrace_icookie_t cookie; 2487 processorid_t cpu = CPU->cpu_id; 2488 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2489 dtrace_specid_t i; 2490 2491 cookie = dtrace_interrupt_disable(); 2492 2493 if (dest->dtb_tomax == NULL) { 2494 dtrace_interrupt_enable(cookie); 2495 return; 2496 } 2497 2498 for (i = 0; i < state->dts_nspeculations; i++) { 2499 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2500 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2501 2502 if (src->dtb_tomax == NULL) 2503 continue; 2504 2505 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2506 src->dtb_offset = 0; 2507 continue; 2508 } 2509 2510 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2511 continue; 2512 2513 if (src->dtb_offset == 0) 2514 continue; 2515 2516 dtrace_speculation_commit(state, cpu, i + 1); 2517 } 2518 2519 dtrace_interrupt_enable(cookie); 2520 } 2521 2522 /* 2523 * Note: not called from probe context. This function is called 2524 * asynchronously (and at a regular interval) to clean any speculations that 2525 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2526 * is work to be done, it cross calls all CPUs to perform that work; 2527 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2528 * INACTIVE state until they have been cleaned by all CPUs. 2529 */ 2530 static void 2531 dtrace_speculation_clean(dtrace_state_t *state) 2532 { 2533 int work = 0, rv; 2534 dtrace_specid_t i; 2535 2536 for (i = 0; i < state->dts_nspeculations; i++) { 2537 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2538 2539 ASSERT(!spec->dtsp_cleaning); 2540 2541 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2542 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2543 continue; 2544 2545 work++; 2546 spec->dtsp_cleaning = 1; 2547 } 2548 2549 if (!work) 2550 return; 2551 2552 dtrace_xcall(DTRACE_CPUALL, 2553 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2554 2555 /* 2556 * We now know that all CPUs have committed or discarded their 2557 * speculation buffers, as appropriate. We can now set the state 2558 * to inactive. 2559 */ 2560 for (i = 0; i < state->dts_nspeculations; i++) { 2561 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2562 dtrace_speculation_state_t current, new; 2563 2564 if (!spec->dtsp_cleaning) 2565 continue; 2566 2567 current = spec->dtsp_state; 2568 ASSERT(current == DTRACESPEC_DISCARDING || 2569 current == DTRACESPEC_COMMITTINGMANY); 2570 2571 new = DTRACESPEC_INACTIVE; 2572 2573 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2574 ASSERT(rv == current); 2575 spec->dtsp_cleaning = 0; 2576 } 2577 } 2578 2579 /* 2580 * Called as part of a speculate() to get the speculative buffer associated 2581 * with a given speculation. Returns NULL if the specified speculation is not 2582 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2583 * the active CPU is not the specified CPU -- the speculation will be 2584 * atomically transitioned into the ACTIVEMANY state. 2585 */ 2586 static dtrace_buffer_t * 2587 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2588 dtrace_specid_t which) 2589 { 2590 dtrace_speculation_t *spec; 2591 dtrace_speculation_state_t current, new; 2592 dtrace_buffer_t *buf; 2593 2594 if (which == 0) 2595 return (NULL); 2596 2597 if (which > state->dts_nspeculations) { 2598 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2599 return (NULL); 2600 } 2601 2602 spec = &state->dts_speculations[which - 1]; 2603 buf = &spec->dtsp_buffer[cpuid]; 2604 2605 do { 2606 current = spec->dtsp_state; 2607 2608 switch (current) { 2609 case DTRACESPEC_INACTIVE: 2610 case DTRACESPEC_COMMITTINGMANY: 2611 case DTRACESPEC_DISCARDING: 2612 return (NULL); 2613 2614 case DTRACESPEC_COMMITTING: 2615 ASSERT(buf->dtb_offset == 0); 2616 return (NULL); 2617 2618 case DTRACESPEC_ACTIVEONE: 2619 /* 2620 * This speculation is currently active on one CPU. 2621 * Check the offset in the buffer; if it's non-zero, 2622 * that CPU must be us (and we leave the state alone). 2623 * If it's zero, assume that we're starting on a new 2624 * CPU -- and change the state to indicate that the 2625 * speculation is active on more than one CPU. 2626 */ 2627 if (buf->dtb_offset != 0) 2628 return (buf); 2629 2630 new = DTRACESPEC_ACTIVEMANY; 2631 break; 2632 2633 case DTRACESPEC_ACTIVEMANY: 2634 return (buf); 2635 2636 case DTRACESPEC_ACTIVE: 2637 new = DTRACESPEC_ACTIVEONE; 2638 break; 2639 2640 default: 2641 ASSERT(0); 2642 } 2643 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2644 current, new) != current); 2645 2646 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2647 return (buf); 2648 } 2649 2650 /* 2651 * Return a string. In the event that the user lacks the privilege to access 2652 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2653 * don't fail access checking. 2654 * 2655 * dtrace_dif_variable() uses this routine as a helper for various 2656 * builtin values such as 'execname' and 'probefunc.' 2657 */ 2658 uintptr_t 2659 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2660 dtrace_mstate_t *mstate) 2661 { 2662 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2663 uintptr_t ret; 2664 size_t strsz; 2665 2666 /* 2667 * The easy case: this probe is allowed to read all of memory, so 2668 * we can just return this as a vanilla pointer. 2669 */ 2670 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2671 return (addr); 2672 2673 /* 2674 * This is the tougher case: we copy the string in question from 2675 * kernel memory into scratch memory and return it that way: this 2676 * ensures that we won't trip up when access checking tests the 2677 * BYREF return value. 2678 */ 2679 strsz = dtrace_strlen((char *)addr, size) + 1; 2680 2681 if (mstate->dtms_scratch_ptr + strsz > 2682 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2683 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2684 return (NULL); 2685 } 2686 2687 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2688 strsz); 2689 ret = mstate->dtms_scratch_ptr; 2690 mstate->dtms_scratch_ptr += strsz; 2691 return (ret); 2692 } 2693 2694 /* 2695 * This function implements the DIF emulator's variable lookups. The emulator 2696 * passes a reserved variable identifier and optional built-in array index. 2697 */ 2698 static uint64_t 2699 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2700 uint64_t ndx) 2701 { 2702 /* 2703 * If we're accessing one of the uncached arguments, we'll turn this 2704 * into a reference in the args array. 2705 */ 2706 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2707 ndx = v - DIF_VAR_ARG0; 2708 v = DIF_VAR_ARGS; 2709 } 2710 2711 switch (v) { 2712 case DIF_VAR_ARGS: 2713 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2714 if (ndx >= sizeof (mstate->dtms_arg) / 2715 sizeof (mstate->dtms_arg[0])) { 2716 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2717 dtrace_provider_t *pv; 2718 uint64_t val; 2719 2720 pv = mstate->dtms_probe->dtpr_provider; 2721 if (pv->dtpv_pops.dtps_getargval != NULL) 2722 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2723 mstate->dtms_probe->dtpr_id, 2724 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2725 else 2726 val = dtrace_getarg(ndx, aframes); 2727 2728 /* 2729 * This is regrettably required to keep the compiler 2730 * from tail-optimizing the call to dtrace_getarg(). 2731 * The condition always evaluates to true, but the 2732 * compiler has no way of figuring that out a priori. 2733 * (None of this would be necessary if the compiler 2734 * could be relied upon to _always_ tail-optimize 2735 * the call to dtrace_getarg() -- but it can't.) 2736 */ 2737 if (mstate->dtms_probe != NULL) 2738 return (val); 2739 2740 ASSERT(0); 2741 } 2742 2743 return (mstate->dtms_arg[ndx]); 2744 2745 case DIF_VAR_UREGS: { 2746 klwp_t *lwp; 2747 2748 if (!dtrace_priv_proc(state)) 2749 return (0); 2750 2751 if ((lwp = curthread->t_lwp) == NULL) { 2752 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2753 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2754 return (0); 2755 } 2756 2757 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2758 } 2759 2760 case DIF_VAR_CURTHREAD: 2761 if (!dtrace_priv_kernel(state)) 2762 return (0); 2763 return ((uint64_t)(uintptr_t)curthread); 2764 2765 case DIF_VAR_TIMESTAMP: 2766 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2767 mstate->dtms_timestamp = dtrace_gethrtime(); 2768 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2769 } 2770 return (mstate->dtms_timestamp); 2771 2772 case DIF_VAR_VTIMESTAMP: 2773 ASSERT(dtrace_vtime_references != 0); 2774 return (curthread->t_dtrace_vtime); 2775 2776 case DIF_VAR_WALLTIMESTAMP: 2777 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2778 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2779 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2780 } 2781 return (mstate->dtms_walltimestamp); 2782 2783 case DIF_VAR_IPL: 2784 if (!dtrace_priv_kernel(state)) 2785 return (0); 2786 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2787 mstate->dtms_ipl = dtrace_getipl(); 2788 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2789 } 2790 return (mstate->dtms_ipl); 2791 2792 case DIF_VAR_EPID: 2793 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2794 return (mstate->dtms_epid); 2795 2796 case DIF_VAR_ID: 2797 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2798 return (mstate->dtms_probe->dtpr_id); 2799 2800 case DIF_VAR_STACKDEPTH: 2801 if (!dtrace_priv_kernel(state)) 2802 return (0); 2803 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2804 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2805 2806 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2807 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2808 } 2809 return (mstate->dtms_stackdepth); 2810 2811 case DIF_VAR_USTACKDEPTH: 2812 if (!dtrace_priv_proc(state)) 2813 return (0); 2814 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2815 /* 2816 * See comment in DIF_VAR_PID. 2817 */ 2818 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2819 CPU_ON_INTR(CPU)) { 2820 mstate->dtms_ustackdepth = 0; 2821 } else { 2822 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2823 mstate->dtms_ustackdepth = 2824 dtrace_getustackdepth(); 2825 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2826 } 2827 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2828 } 2829 return (mstate->dtms_ustackdepth); 2830 2831 case DIF_VAR_CALLER: 2832 if (!dtrace_priv_kernel(state)) 2833 return (0); 2834 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2835 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2836 2837 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2838 /* 2839 * If this is an unanchored probe, we are 2840 * required to go through the slow path: 2841 * dtrace_caller() only guarantees correct 2842 * results for anchored probes. 2843 */ 2844 pc_t caller[2]; 2845 2846 dtrace_getpcstack(caller, 2, aframes, 2847 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2848 mstate->dtms_caller = caller[1]; 2849 } else if ((mstate->dtms_caller = 2850 dtrace_caller(aframes)) == -1) { 2851 /* 2852 * We have failed to do this the quick way; 2853 * we must resort to the slower approach of 2854 * calling dtrace_getpcstack(). 2855 */ 2856 pc_t caller; 2857 2858 dtrace_getpcstack(&caller, 1, aframes, NULL); 2859 mstate->dtms_caller = caller; 2860 } 2861 2862 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2863 } 2864 return (mstate->dtms_caller); 2865 2866 case DIF_VAR_UCALLER: 2867 if (!dtrace_priv_proc(state)) 2868 return (0); 2869 2870 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2871 uint64_t ustack[3]; 2872 2873 /* 2874 * dtrace_getupcstack() fills in the first uint64_t 2875 * with the current PID. The second uint64_t will 2876 * be the program counter at user-level. The third 2877 * uint64_t will contain the caller, which is what 2878 * we're after. 2879 */ 2880 ustack[2] = NULL; 2881 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2882 dtrace_getupcstack(ustack, 3); 2883 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2884 mstate->dtms_ucaller = ustack[2]; 2885 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2886 } 2887 2888 return (mstate->dtms_ucaller); 2889 2890 case DIF_VAR_PROBEPROV: 2891 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2892 return (dtrace_dif_varstr( 2893 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2894 state, mstate)); 2895 2896 case DIF_VAR_PROBEMOD: 2897 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2898 return (dtrace_dif_varstr( 2899 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2900 state, mstate)); 2901 2902 case DIF_VAR_PROBEFUNC: 2903 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2904 return (dtrace_dif_varstr( 2905 (uintptr_t)mstate->dtms_probe->dtpr_func, 2906 state, mstate)); 2907 2908 case DIF_VAR_PROBENAME: 2909 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2910 return (dtrace_dif_varstr( 2911 (uintptr_t)mstate->dtms_probe->dtpr_name, 2912 state, mstate)); 2913 2914 case DIF_VAR_PID: 2915 if (!dtrace_priv_proc(state)) 2916 return (0); 2917 2918 /* 2919 * Note that we are assuming that an unanchored probe is 2920 * always due to a high-level interrupt. (And we're assuming 2921 * that there is only a single high level interrupt.) 2922 */ 2923 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2924 return (pid0.pid_id); 2925 2926 /* 2927 * It is always safe to dereference one's own t_procp pointer: 2928 * it always points to a valid, allocated proc structure. 2929 * Further, it is always safe to dereference the p_pidp member 2930 * of one's own proc structure. (These are truisms becuase 2931 * threads and processes don't clean up their own state -- 2932 * they leave that task to whomever reaps them.) 2933 */ 2934 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2935 2936 case DIF_VAR_PPID: 2937 if (!dtrace_priv_proc(state)) 2938 return (0); 2939 2940 /* 2941 * See comment in DIF_VAR_PID. 2942 */ 2943 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2944 return (pid0.pid_id); 2945 2946 /* 2947 * It is always safe to dereference one's own t_procp pointer: 2948 * it always points to a valid, allocated proc structure. 2949 * (This is true because threads don't clean up their own 2950 * state -- they leave that task to whomever reaps them.) 2951 */ 2952 return ((uint64_t)curthread->t_procp->p_ppid); 2953 2954 case DIF_VAR_TID: 2955 /* 2956 * See comment in DIF_VAR_PID. 2957 */ 2958 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2959 return (0); 2960 2961 return ((uint64_t)curthread->t_tid); 2962 2963 case DIF_VAR_EXECNAME: 2964 if (!dtrace_priv_proc(state)) 2965 return (0); 2966 2967 /* 2968 * See comment in DIF_VAR_PID. 2969 */ 2970 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2971 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2972 2973 /* 2974 * It is always safe to dereference one's own t_procp pointer: 2975 * it always points to a valid, allocated proc structure. 2976 * (This is true because threads don't clean up their own 2977 * state -- they leave that task to whomever reaps them.) 2978 */ 2979 return (dtrace_dif_varstr( 2980 (uintptr_t)curthread->t_procp->p_user.u_comm, 2981 state, mstate)); 2982 2983 case DIF_VAR_ZONENAME: 2984 if (!dtrace_priv_proc(state)) 2985 return (0); 2986 2987 /* 2988 * See comment in DIF_VAR_PID. 2989 */ 2990 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2991 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2992 2993 /* 2994 * It is always safe to dereference one's own t_procp pointer: 2995 * it always points to a valid, allocated proc structure. 2996 * (This is true because threads don't clean up their own 2997 * state -- they leave that task to whomever reaps them.) 2998 */ 2999 return (dtrace_dif_varstr( 3000 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3001 state, mstate)); 3002 3003 case DIF_VAR_UID: 3004 if (!dtrace_priv_proc(state)) 3005 return (0); 3006 3007 /* 3008 * See comment in DIF_VAR_PID. 3009 */ 3010 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3011 return ((uint64_t)p0.p_cred->cr_uid); 3012 3013 /* 3014 * It is always safe to dereference one's own t_procp pointer: 3015 * it always points to a valid, allocated proc structure. 3016 * (This is true because threads don't clean up their own 3017 * state -- they leave that task to whomever reaps them.) 3018 * 3019 * Additionally, it is safe to dereference one's own process 3020 * credential, since this is never NULL after process birth. 3021 */ 3022 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3023 3024 case DIF_VAR_GID: 3025 if (!dtrace_priv_proc(state)) 3026 return (0); 3027 3028 /* 3029 * See comment in DIF_VAR_PID. 3030 */ 3031 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3032 return ((uint64_t)p0.p_cred->cr_gid); 3033 3034 /* 3035 * It is always safe to dereference one's own t_procp pointer: 3036 * it always points to a valid, allocated proc structure. 3037 * (This is true because threads don't clean up their own 3038 * state -- they leave that task to whomever reaps them.) 3039 * 3040 * Additionally, it is safe to dereference one's own process 3041 * credential, since this is never NULL after process birth. 3042 */ 3043 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3044 3045 case DIF_VAR_ERRNO: { 3046 klwp_t *lwp; 3047 if (!dtrace_priv_proc(state)) 3048 return (0); 3049 3050 /* 3051 * See comment in DIF_VAR_PID. 3052 */ 3053 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3054 return (0); 3055 3056 /* 3057 * It is always safe to dereference one's own t_lwp pointer in 3058 * the event that this pointer is non-NULL. (This is true 3059 * because threads and lwps don't clean up their own state -- 3060 * they leave that task to whomever reaps them.) 3061 */ 3062 if ((lwp = curthread->t_lwp) == NULL) 3063 return (0); 3064 3065 return ((uint64_t)lwp->lwp_errno); 3066 } 3067 default: 3068 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3069 return (0); 3070 } 3071 } 3072 3073 /* 3074 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3075 * Notice that we don't bother validating the proper number of arguments or 3076 * their types in the tuple stack. This isn't needed because all argument 3077 * interpretation is safe because of our load safety -- the worst that can 3078 * happen is that a bogus program can obtain bogus results. 3079 */ 3080 static void 3081 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3082 dtrace_key_t *tupregs, int nargs, 3083 dtrace_mstate_t *mstate, dtrace_state_t *state) 3084 { 3085 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3086 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3087 dtrace_vstate_t *vstate = &state->dts_vstate; 3088 3089 union { 3090 mutex_impl_t mi; 3091 uint64_t mx; 3092 } m; 3093 3094 union { 3095 krwlock_t ri; 3096 uintptr_t rw; 3097 } r; 3098 3099 switch (subr) { 3100 case DIF_SUBR_RAND: 3101 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3102 break; 3103 3104 case DIF_SUBR_MUTEX_OWNED: 3105 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3106 mstate, vstate)) { 3107 regs[rd] = NULL; 3108 break; 3109 } 3110 3111 m.mx = dtrace_load64(tupregs[0].dttk_value); 3112 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3113 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3114 else 3115 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3116 break; 3117 3118 case DIF_SUBR_MUTEX_OWNER: 3119 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3120 mstate, vstate)) { 3121 regs[rd] = NULL; 3122 break; 3123 } 3124 3125 m.mx = dtrace_load64(tupregs[0].dttk_value); 3126 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3127 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3128 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3129 else 3130 regs[rd] = 0; 3131 break; 3132 3133 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3134 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3135 mstate, vstate)) { 3136 regs[rd] = NULL; 3137 break; 3138 } 3139 3140 m.mx = dtrace_load64(tupregs[0].dttk_value); 3141 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3142 break; 3143 3144 case DIF_SUBR_MUTEX_TYPE_SPIN: 3145 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3146 mstate, vstate)) { 3147 regs[rd] = NULL; 3148 break; 3149 } 3150 3151 m.mx = dtrace_load64(tupregs[0].dttk_value); 3152 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3153 break; 3154 3155 case DIF_SUBR_RW_READ_HELD: { 3156 uintptr_t tmp; 3157 3158 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3159 mstate, vstate)) { 3160 regs[rd] = NULL; 3161 break; 3162 } 3163 3164 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3165 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3166 break; 3167 } 3168 3169 case DIF_SUBR_RW_WRITE_HELD: 3170 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3171 mstate, vstate)) { 3172 regs[rd] = NULL; 3173 break; 3174 } 3175 3176 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3177 regs[rd] = _RW_WRITE_HELD(&r.ri); 3178 break; 3179 3180 case DIF_SUBR_RW_ISWRITER: 3181 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3182 mstate, vstate)) { 3183 regs[rd] = NULL; 3184 break; 3185 } 3186 3187 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3188 regs[rd] = _RW_ISWRITER(&r.ri); 3189 break; 3190 3191 case DIF_SUBR_BCOPY: { 3192 /* 3193 * We need to be sure that the destination is in the scratch 3194 * region -- no other region is allowed. 3195 */ 3196 uintptr_t src = tupregs[0].dttk_value; 3197 uintptr_t dest = tupregs[1].dttk_value; 3198 size_t size = tupregs[2].dttk_value; 3199 3200 if (!dtrace_inscratch(dest, size, mstate)) { 3201 *flags |= CPU_DTRACE_BADADDR; 3202 *illval = regs[rd]; 3203 break; 3204 } 3205 3206 if (!dtrace_canload(src, size, mstate, vstate)) { 3207 regs[rd] = NULL; 3208 break; 3209 } 3210 3211 dtrace_bcopy((void *)src, (void *)dest, size); 3212 break; 3213 } 3214 3215 case DIF_SUBR_ALLOCA: 3216 case DIF_SUBR_COPYIN: { 3217 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3218 uint64_t size = 3219 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3220 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3221 3222 /* 3223 * This action doesn't require any credential checks since 3224 * probes will not activate in user contexts to which the 3225 * enabling user does not have permissions. 3226 */ 3227 3228 /* 3229 * Rounding up the user allocation size could have overflowed 3230 * a large, bogus allocation (like -1ULL) to 0. 3231 */ 3232 if (scratch_size < size || 3233 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3234 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3235 regs[rd] = NULL; 3236 break; 3237 } 3238 3239 if (subr == DIF_SUBR_COPYIN) { 3240 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3241 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3242 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3243 } 3244 3245 mstate->dtms_scratch_ptr += scratch_size; 3246 regs[rd] = dest; 3247 break; 3248 } 3249 3250 case DIF_SUBR_COPYINTO: { 3251 uint64_t size = tupregs[1].dttk_value; 3252 uintptr_t dest = tupregs[2].dttk_value; 3253 3254 /* 3255 * This action doesn't require any credential checks since 3256 * probes will not activate in user contexts to which the 3257 * enabling user does not have permissions. 3258 */ 3259 if (!dtrace_inscratch(dest, size, mstate)) { 3260 *flags |= CPU_DTRACE_BADADDR; 3261 *illval = regs[rd]; 3262 break; 3263 } 3264 3265 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3266 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3267 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3268 break; 3269 } 3270 3271 case DIF_SUBR_COPYINSTR: { 3272 uintptr_t dest = mstate->dtms_scratch_ptr; 3273 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3274 3275 if (nargs > 1 && tupregs[1].dttk_value < size) 3276 size = tupregs[1].dttk_value + 1; 3277 3278 /* 3279 * This action doesn't require any credential checks since 3280 * probes will not activate in user contexts to which the 3281 * enabling user does not have permissions. 3282 */ 3283 if (!DTRACE_INSCRATCH(mstate, size)) { 3284 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3285 regs[rd] = NULL; 3286 break; 3287 } 3288 3289 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3290 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3291 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3292 3293 ((char *)dest)[size - 1] = '\0'; 3294 mstate->dtms_scratch_ptr += size; 3295 regs[rd] = dest; 3296 break; 3297 } 3298 3299 case DIF_SUBR_MSGSIZE: 3300 case DIF_SUBR_MSGDSIZE: { 3301 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3302 uintptr_t wptr, rptr; 3303 size_t count = 0; 3304 int cont = 0; 3305 3306 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3307 3308 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3309 vstate)) { 3310 regs[rd] = NULL; 3311 break; 3312 } 3313 3314 wptr = dtrace_loadptr(baddr + 3315 offsetof(mblk_t, b_wptr)); 3316 3317 rptr = dtrace_loadptr(baddr + 3318 offsetof(mblk_t, b_rptr)); 3319 3320 if (wptr < rptr) { 3321 *flags |= CPU_DTRACE_BADADDR; 3322 *illval = tupregs[0].dttk_value; 3323 break; 3324 } 3325 3326 daddr = dtrace_loadptr(baddr + 3327 offsetof(mblk_t, b_datap)); 3328 3329 baddr = dtrace_loadptr(baddr + 3330 offsetof(mblk_t, b_cont)); 3331 3332 /* 3333 * We want to prevent against denial-of-service here, 3334 * so we're only going to search the list for 3335 * dtrace_msgdsize_max mblks. 3336 */ 3337 if (cont++ > dtrace_msgdsize_max) { 3338 *flags |= CPU_DTRACE_ILLOP; 3339 break; 3340 } 3341 3342 if (subr == DIF_SUBR_MSGDSIZE) { 3343 if (dtrace_load8(daddr + 3344 offsetof(dblk_t, db_type)) != M_DATA) 3345 continue; 3346 } 3347 3348 count += wptr - rptr; 3349 } 3350 3351 if (!(*flags & CPU_DTRACE_FAULT)) 3352 regs[rd] = count; 3353 3354 break; 3355 } 3356 3357 case DIF_SUBR_PROGENYOF: { 3358 pid_t pid = tupregs[0].dttk_value; 3359 proc_t *p; 3360 int rval = 0; 3361 3362 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3363 3364 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3365 if (p->p_pidp->pid_id == pid) { 3366 rval = 1; 3367 break; 3368 } 3369 } 3370 3371 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3372 3373 regs[rd] = rval; 3374 break; 3375 } 3376 3377 case DIF_SUBR_SPECULATION: 3378 regs[rd] = dtrace_speculation(state); 3379 break; 3380 3381 case DIF_SUBR_COPYOUT: { 3382 uintptr_t kaddr = tupregs[0].dttk_value; 3383 uintptr_t uaddr = tupregs[1].dttk_value; 3384 uint64_t size = tupregs[2].dttk_value; 3385 3386 if (!dtrace_destructive_disallow && 3387 dtrace_priv_proc_control(state) && 3388 !dtrace_istoxic(kaddr, size)) { 3389 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3390 dtrace_copyout(kaddr, uaddr, size, flags); 3391 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3392 } 3393 break; 3394 } 3395 3396 case DIF_SUBR_COPYOUTSTR: { 3397 uintptr_t kaddr = tupregs[0].dttk_value; 3398 uintptr_t uaddr = tupregs[1].dttk_value; 3399 uint64_t size = tupregs[2].dttk_value; 3400 3401 if (!dtrace_destructive_disallow && 3402 dtrace_priv_proc_control(state) && 3403 !dtrace_istoxic(kaddr, size)) { 3404 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3405 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3406 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3407 } 3408 break; 3409 } 3410 3411 case DIF_SUBR_STRLEN: { 3412 size_t sz; 3413 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3414 sz = dtrace_strlen((char *)addr, 3415 state->dts_options[DTRACEOPT_STRSIZE]); 3416 3417 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3418 regs[rd] = NULL; 3419 break; 3420 } 3421 3422 regs[rd] = sz; 3423 3424 break; 3425 } 3426 3427 case DIF_SUBR_STRCHR: 3428 case DIF_SUBR_STRRCHR: { 3429 /* 3430 * We're going to iterate over the string looking for the 3431 * specified character. We will iterate until we have reached 3432 * the string length or we have found the character. If this 3433 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3434 * of the specified character instead of the first. 3435 */ 3436 uintptr_t saddr = tupregs[0].dttk_value; 3437 uintptr_t addr = tupregs[0].dttk_value; 3438 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3439 char c, target = (char)tupregs[1].dttk_value; 3440 3441 for (regs[rd] = NULL; addr < limit; addr++) { 3442 if ((c = dtrace_load8(addr)) == target) { 3443 regs[rd] = addr; 3444 3445 if (subr == DIF_SUBR_STRCHR) 3446 break; 3447 } 3448 3449 if (c == '\0') 3450 break; 3451 } 3452 3453 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3454 regs[rd] = NULL; 3455 break; 3456 } 3457 3458 break; 3459 } 3460 3461 case DIF_SUBR_STRSTR: 3462 case DIF_SUBR_INDEX: 3463 case DIF_SUBR_RINDEX: { 3464 /* 3465 * We're going to iterate over the string looking for the 3466 * specified string. We will iterate until we have reached 3467 * the string length or we have found the string. (Yes, this 3468 * is done in the most naive way possible -- but considering 3469 * that the string we're searching for is likely to be 3470 * relatively short, the complexity of Rabin-Karp or similar 3471 * hardly seems merited.) 3472 */ 3473 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3474 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3475 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3476 size_t len = dtrace_strlen(addr, size); 3477 size_t sublen = dtrace_strlen(substr, size); 3478 char *limit = addr + len, *orig = addr; 3479 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3480 int inc = 1; 3481 3482 regs[rd] = notfound; 3483 3484 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3485 regs[rd] = NULL; 3486 break; 3487 } 3488 3489 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3490 vstate)) { 3491 regs[rd] = NULL; 3492 break; 3493 } 3494 3495 /* 3496 * strstr() and index()/rindex() have similar semantics if 3497 * both strings are the empty string: strstr() returns a 3498 * pointer to the (empty) string, and index() and rindex() 3499 * both return index 0 (regardless of any position argument). 3500 */ 3501 if (sublen == 0 && len == 0) { 3502 if (subr == DIF_SUBR_STRSTR) 3503 regs[rd] = (uintptr_t)addr; 3504 else 3505 regs[rd] = 0; 3506 break; 3507 } 3508 3509 if (subr != DIF_SUBR_STRSTR) { 3510 if (subr == DIF_SUBR_RINDEX) { 3511 limit = orig - 1; 3512 addr += len; 3513 inc = -1; 3514 } 3515 3516 /* 3517 * Both index() and rindex() take an optional position 3518 * argument that denotes the starting position. 3519 */ 3520 if (nargs == 3) { 3521 int64_t pos = (int64_t)tupregs[2].dttk_value; 3522 3523 /* 3524 * If the position argument to index() is 3525 * negative, Perl implicitly clamps it at 3526 * zero. This semantic is a little surprising 3527 * given the special meaning of negative 3528 * positions to similar Perl functions like 3529 * substr(), but it appears to reflect a 3530 * notion that index() can start from a 3531 * negative index and increment its way up to 3532 * the string. Given this notion, Perl's 3533 * rindex() is at least self-consistent in 3534 * that it implicitly clamps positions greater 3535 * than the string length to be the string 3536 * length. Where Perl completely loses 3537 * coherence, however, is when the specified 3538 * substring is the empty string (""). In 3539 * this case, even if the position is 3540 * negative, rindex() returns 0 -- and even if 3541 * the position is greater than the length, 3542 * index() returns the string length. These 3543 * semantics violate the notion that index() 3544 * should never return a value less than the 3545 * specified position and that rindex() should 3546 * never return a value greater than the 3547 * specified position. (One assumes that 3548 * these semantics are artifacts of Perl's 3549 * implementation and not the results of 3550 * deliberate design -- it beggars belief that 3551 * even Larry Wall could desire such oddness.) 3552 * While in the abstract one would wish for 3553 * consistent position semantics across 3554 * substr(), index() and rindex() -- or at the 3555 * very least self-consistent position 3556 * semantics for index() and rindex() -- we 3557 * instead opt to keep with the extant Perl 3558 * semantics, in all their broken glory. (Do 3559 * we have more desire to maintain Perl's 3560 * semantics than Perl does? Probably.) 3561 */ 3562 if (subr == DIF_SUBR_RINDEX) { 3563 if (pos < 0) { 3564 if (sublen == 0) 3565 regs[rd] = 0; 3566 break; 3567 } 3568 3569 if (pos > len) 3570 pos = len; 3571 } else { 3572 if (pos < 0) 3573 pos = 0; 3574 3575 if (pos >= len) { 3576 if (sublen == 0) 3577 regs[rd] = len; 3578 break; 3579 } 3580 } 3581 3582 addr = orig + pos; 3583 } 3584 } 3585 3586 for (regs[rd] = notfound; addr != limit; addr += inc) { 3587 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3588 if (subr != DIF_SUBR_STRSTR) { 3589 /* 3590 * As D index() and rindex() are 3591 * modeled on Perl (and not on awk), 3592 * we return a zero-based (and not a 3593 * one-based) index. (For you Perl 3594 * weenies: no, we're not going to add 3595 * $[ -- and shouldn't you be at a con 3596 * or something?) 3597 */ 3598 regs[rd] = (uintptr_t)(addr - orig); 3599 break; 3600 } 3601 3602 ASSERT(subr == DIF_SUBR_STRSTR); 3603 regs[rd] = (uintptr_t)addr; 3604 break; 3605 } 3606 } 3607 3608 break; 3609 } 3610 3611 case DIF_SUBR_STRTOK: { 3612 uintptr_t addr = tupregs[0].dttk_value; 3613 uintptr_t tokaddr = tupregs[1].dttk_value; 3614 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3615 uintptr_t limit, toklimit = tokaddr + size; 3616 uint8_t c, tokmap[32]; /* 256 / 8 */ 3617 char *dest = (char *)mstate->dtms_scratch_ptr; 3618 int i; 3619 3620 /* 3621 * Check both the token buffer and (later) the input buffer, 3622 * since both could be non-scratch addresses. 3623 */ 3624 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3625 regs[rd] = NULL; 3626 break; 3627 } 3628 3629 if (!DTRACE_INSCRATCH(mstate, size)) { 3630 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3631 regs[rd] = NULL; 3632 break; 3633 } 3634 3635 if (addr == NULL) { 3636 /* 3637 * If the address specified is NULL, we use our saved 3638 * strtok pointer from the mstate. Note that this 3639 * means that the saved strtok pointer is _only_ 3640 * valid within multiple enablings of the same probe -- 3641 * it behaves like an implicit clause-local variable. 3642 */ 3643 addr = mstate->dtms_strtok; 3644 } else { 3645 /* 3646 * If the user-specified address is non-NULL we must 3647 * access check it. This is the only time we have 3648 * a chance to do so, since this address may reside 3649 * in the string table of this clause-- future calls 3650 * (when we fetch addr from mstate->dtms_strtok) 3651 * would fail this access check. 3652 */ 3653 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3654 regs[rd] = NULL; 3655 break; 3656 } 3657 } 3658 3659 /* 3660 * First, zero the token map, and then process the token 3661 * string -- setting a bit in the map for every character 3662 * found in the token string. 3663 */ 3664 for (i = 0; i < sizeof (tokmap); i++) 3665 tokmap[i] = 0; 3666 3667 for (; tokaddr < toklimit; tokaddr++) { 3668 if ((c = dtrace_load8(tokaddr)) == '\0') 3669 break; 3670 3671 ASSERT((c >> 3) < sizeof (tokmap)); 3672 tokmap[c >> 3] |= (1 << (c & 0x7)); 3673 } 3674 3675 for (limit = addr + size; addr < limit; addr++) { 3676 /* 3677 * We're looking for a character that is _not_ contained 3678 * in the token string. 3679 */ 3680 if ((c = dtrace_load8(addr)) == '\0') 3681 break; 3682 3683 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3684 break; 3685 } 3686 3687 if (c == '\0') { 3688 /* 3689 * We reached the end of the string without finding 3690 * any character that was not in the token string. 3691 * We return NULL in this case, and we set the saved 3692 * address to NULL as well. 3693 */ 3694 regs[rd] = NULL; 3695 mstate->dtms_strtok = NULL; 3696 break; 3697 } 3698 3699 /* 3700 * From here on, we're copying into the destination string. 3701 */ 3702 for (i = 0; addr < limit && i < size - 1; addr++) { 3703 if ((c = dtrace_load8(addr)) == '\0') 3704 break; 3705 3706 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3707 break; 3708 3709 ASSERT(i < size); 3710 dest[i++] = c; 3711 } 3712 3713 ASSERT(i < size); 3714 dest[i] = '\0'; 3715 regs[rd] = (uintptr_t)dest; 3716 mstate->dtms_scratch_ptr += size; 3717 mstate->dtms_strtok = addr; 3718 break; 3719 } 3720 3721 case DIF_SUBR_SUBSTR: { 3722 uintptr_t s = tupregs[0].dttk_value; 3723 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3724 char *d = (char *)mstate->dtms_scratch_ptr; 3725 int64_t index = (int64_t)tupregs[1].dttk_value; 3726 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3727 size_t len = dtrace_strlen((char *)s, size); 3728 int64_t i; 3729 3730 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3731 regs[rd] = NULL; 3732 break; 3733 } 3734 3735 if (!DTRACE_INSCRATCH(mstate, size)) { 3736 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3737 regs[rd] = NULL; 3738 break; 3739 } 3740 3741 if (nargs <= 2) 3742 remaining = (int64_t)size; 3743 3744 if (index < 0) { 3745 index += len; 3746 3747 if (index < 0 && index + remaining > 0) { 3748 remaining += index; 3749 index = 0; 3750 } 3751 } 3752 3753 if (index >= len || index < 0) { 3754 remaining = 0; 3755 } else if (remaining < 0) { 3756 remaining += len - index; 3757 } else if (index + remaining > size) { 3758 remaining = size - index; 3759 } 3760 3761 for (i = 0; i < remaining; i++) { 3762 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3763 break; 3764 } 3765 3766 d[i] = '\0'; 3767 3768 mstate->dtms_scratch_ptr += size; 3769 regs[rd] = (uintptr_t)d; 3770 break; 3771 } 3772 3773 case DIF_SUBR_GETMAJOR: 3774 #ifdef _LP64 3775 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3776 #else 3777 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3778 #endif 3779 break; 3780 3781 case DIF_SUBR_GETMINOR: 3782 #ifdef _LP64 3783 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3784 #else 3785 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3786 #endif 3787 break; 3788 3789 case DIF_SUBR_DDI_PATHNAME: { 3790 /* 3791 * This one is a galactic mess. We are going to roughly 3792 * emulate ddi_pathname(), but it's made more complicated 3793 * by the fact that we (a) want to include the minor name and 3794 * (b) must proceed iteratively instead of recursively. 3795 */ 3796 uintptr_t dest = mstate->dtms_scratch_ptr; 3797 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3798 char *start = (char *)dest, *end = start + size - 1; 3799 uintptr_t daddr = tupregs[0].dttk_value; 3800 int64_t minor = (int64_t)tupregs[1].dttk_value; 3801 char *s; 3802 int i, len, depth = 0; 3803 3804 /* 3805 * Due to all the pointer jumping we do and context we must 3806 * rely upon, we just mandate that the user must have kernel 3807 * read privileges to use this routine. 3808 */ 3809 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3810 *flags |= CPU_DTRACE_KPRIV; 3811 *illval = daddr; 3812 regs[rd] = NULL; 3813 } 3814 3815 if (!DTRACE_INSCRATCH(mstate, size)) { 3816 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3817 regs[rd] = NULL; 3818 break; 3819 } 3820 3821 *end = '\0'; 3822 3823 /* 3824 * We want to have a name for the minor. In order to do this, 3825 * we need to walk the minor list from the devinfo. We want 3826 * to be sure that we don't infinitely walk a circular list, 3827 * so we check for circularity by sending a scout pointer 3828 * ahead two elements for every element that we iterate over; 3829 * if the list is circular, these will ultimately point to the 3830 * same element. You may recognize this little trick as the 3831 * answer to a stupid interview question -- one that always 3832 * seems to be asked by those who had to have it laboriously 3833 * explained to them, and who can't even concisely describe 3834 * the conditions under which one would be forced to resort to 3835 * this technique. Needless to say, those conditions are 3836 * found here -- and probably only here. Is this the only use 3837 * of this infamous trick in shipping, production code? If it 3838 * isn't, it probably should be... 3839 */ 3840 if (minor != -1) { 3841 uintptr_t maddr = dtrace_loadptr(daddr + 3842 offsetof(struct dev_info, devi_minor)); 3843 3844 uintptr_t next = offsetof(struct ddi_minor_data, next); 3845 uintptr_t name = offsetof(struct ddi_minor_data, 3846 d_minor) + offsetof(struct ddi_minor, name); 3847 uintptr_t dev = offsetof(struct ddi_minor_data, 3848 d_minor) + offsetof(struct ddi_minor, dev); 3849 uintptr_t scout; 3850 3851 if (maddr != NULL) 3852 scout = dtrace_loadptr(maddr + next); 3853 3854 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3855 uint64_t m; 3856 #ifdef _LP64 3857 m = dtrace_load64(maddr + dev) & MAXMIN64; 3858 #else 3859 m = dtrace_load32(maddr + dev) & MAXMIN; 3860 #endif 3861 if (m != minor) { 3862 maddr = dtrace_loadptr(maddr + next); 3863 3864 if (scout == NULL) 3865 continue; 3866 3867 scout = dtrace_loadptr(scout + next); 3868 3869 if (scout == NULL) 3870 continue; 3871 3872 scout = dtrace_loadptr(scout + next); 3873 3874 if (scout == NULL) 3875 continue; 3876 3877 if (scout == maddr) { 3878 *flags |= CPU_DTRACE_ILLOP; 3879 break; 3880 } 3881 3882 continue; 3883 } 3884 3885 /* 3886 * We have the minor data. Now we need to 3887 * copy the minor's name into the end of the 3888 * pathname. 3889 */ 3890 s = (char *)dtrace_loadptr(maddr + name); 3891 len = dtrace_strlen(s, size); 3892 3893 if (*flags & CPU_DTRACE_FAULT) 3894 break; 3895 3896 if (len != 0) { 3897 if ((end -= (len + 1)) < start) 3898 break; 3899 3900 *end = ':'; 3901 } 3902 3903 for (i = 1; i <= len; i++) 3904 end[i] = dtrace_load8((uintptr_t)s++); 3905 break; 3906 } 3907 } 3908 3909 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3910 ddi_node_state_t devi_state; 3911 3912 devi_state = dtrace_load32(daddr + 3913 offsetof(struct dev_info, devi_node_state)); 3914 3915 if (*flags & CPU_DTRACE_FAULT) 3916 break; 3917 3918 if (devi_state >= DS_INITIALIZED) { 3919 s = (char *)dtrace_loadptr(daddr + 3920 offsetof(struct dev_info, devi_addr)); 3921 len = dtrace_strlen(s, size); 3922 3923 if (*flags & CPU_DTRACE_FAULT) 3924 break; 3925 3926 if (len != 0) { 3927 if ((end -= (len + 1)) < start) 3928 break; 3929 3930 *end = '@'; 3931 } 3932 3933 for (i = 1; i <= len; i++) 3934 end[i] = dtrace_load8((uintptr_t)s++); 3935 } 3936 3937 /* 3938 * Now for the node name... 3939 */ 3940 s = (char *)dtrace_loadptr(daddr + 3941 offsetof(struct dev_info, devi_node_name)); 3942 3943 daddr = dtrace_loadptr(daddr + 3944 offsetof(struct dev_info, devi_parent)); 3945 3946 /* 3947 * If our parent is NULL (that is, if we're the root 3948 * node), we're going to use the special path 3949 * "devices". 3950 */ 3951 if (daddr == NULL) 3952 s = "devices"; 3953 3954 len = dtrace_strlen(s, size); 3955 if (*flags & CPU_DTRACE_FAULT) 3956 break; 3957 3958 if ((end -= (len + 1)) < start) 3959 break; 3960 3961 for (i = 1; i <= len; i++) 3962 end[i] = dtrace_load8((uintptr_t)s++); 3963 *end = '/'; 3964 3965 if (depth++ > dtrace_devdepth_max) { 3966 *flags |= CPU_DTRACE_ILLOP; 3967 break; 3968 } 3969 } 3970 3971 if (end < start) 3972 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3973 3974 if (daddr == NULL) { 3975 regs[rd] = (uintptr_t)end; 3976 mstate->dtms_scratch_ptr += size; 3977 } 3978 3979 break; 3980 } 3981 3982 case DIF_SUBR_STRJOIN: { 3983 char *d = (char *)mstate->dtms_scratch_ptr; 3984 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3985 uintptr_t s1 = tupregs[0].dttk_value; 3986 uintptr_t s2 = tupregs[1].dttk_value; 3987 int i = 0; 3988 3989 if (!dtrace_strcanload(s1, size, mstate, vstate) || 3990 !dtrace_strcanload(s2, size, mstate, vstate)) { 3991 regs[rd] = NULL; 3992 break; 3993 } 3994 3995 if (!DTRACE_INSCRATCH(mstate, size)) { 3996 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3997 regs[rd] = NULL; 3998 break; 3999 } 4000 4001 for (;;) { 4002 if (i >= size) { 4003 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4004 regs[rd] = NULL; 4005 break; 4006 } 4007 4008 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4009 i--; 4010 break; 4011 } 4012 } 4013 4014 for (;;) { 4015 if (i >= size) { 4016 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4017 regs[rd] = NULL; 4018 break; 4019 } 4020 4021 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4022 break; 4023 } 4024 4025 if (i < size) { 4026 mstate->dtms_scratch_ptr += i; 4027 regs[rd] = (uintptr_t)d; 4028 } 4029 4030 break; 4031 } 4032 4033 case DIF_SUBR_LLTOSTR: { 4034 int64_t i = (int64_t)tupregs[0].dttk_value; 4035 int64_t val = i < 0 ? i * -1 : i; 4036 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4037 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4038 4039 if (!DTRACE_INSCRATCH(mstate, size)) { 4040 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4041 regs[rd] = NULL; 4042 break; 4043 } 4044 4045 for (*end-- = '\0'; val; val /= 10) 4046 *end-- = '0' + (val % 10); 4047 4048 if (i == 0) 4049 *end-- = '0'; 4050 4051 if (i < 0) 4052 *end-- = '-'; 4053 4054 regs[rd] = (uintptr_t)end + 1; 4055 mstate->dtms_scratch_ptr += size; 4056 break; 4057 } 4058 4059 case DIF_SUBR_HTONS: 4060 case DIF_SUBR_NTOHS: 4061 #ifdef _BIG_ENDIAN 4062 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4063 #else 4064 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4065 #endif 4066 break; 4067 4068 4069 case DIF_SUBR_HTONL: 4070 case DIF_SUBR_NTOHL: 4071 #ifdef _BIG_ENDIAN 4072 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4073 #else 4074 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4075 #endif 4076 break; 4077 4078 4079 case DIF_SUBR_HTONLL: 4080 case DIF_SUBR_NTOHLL: 4081 #ifdef _BIG_ENDIAN 4082 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4083 #else 4084 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4085 #endif 4086 break; 4087 4088 4089 case DIF_SUBR_DIRNAME: 4090 case DIF_SUBR_BASENAME: { 4091 char *dest = (char *)mstate->dtms_scratch_ptr; 4092 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4093 uintptr_t src = tupregs[0].dttk_value; 4094 int i, j, len = dtrace_strlen((char *)src, size); 4095 int lastbase = -1, firstbase = -1, lastdir = -1; 4096 int start, end; 4097 4098 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4099 regs[rd] = NULL; 4100 break; 4101 } 4102 4103 if (!DTRACE_INSCRATCH(mstate, size)) { 4104 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4105 regs[rd] = NULL; 4106 break; 4107 } 4108 4109 /* 4110 * The basename and dirname for a zero-length string is 4111 * defined to be "." 4112 */ 4113 if (len == 0) { 4114 len = 1; 4115 src = (uintptr_t)"."; 4116 } 4117 4118 /* 4119 * Start from the back of the string, moving back toward the 4120 * front until we see a character that isn't a slash. That 4121 * character is the last character in the basename. 4122 */ 4123 for (i = len - 1; i >= 0; i--) { 4124 if (dtrace_load8(src + i) != '/') 4125 break; 4126 } 4127 4128 if (i >= 0) 4129 lastbase = i; 4130 4131 /* 4132 * Starting from the last character in the basename, move 4133 * towards the front until we find a slash. The character 4134 * that we processed immediately before that is the first 4135 * character in the basename. 4136 */ 4137 for (; i >= 0; i--) { 4138 if (dtrace_load8(src + i) == '/') 4139 break; 4140 } 4141 4142 if (i >= 0) 4143 firstbase = i + 1; 4144 4145 /* 4146 * Now keep going until we find a non-slash character. That 4147 * character is the last character in the dirname. 4148 */ 4149 for (; i >= 0; i--) { 4150 if (dtrace_load8(src + i) != '/') 4151 break; 4152 } 4153 4154 if (i >= 0) 4155 lastdir = i; 4156 4157 ASSERT(!(lastbase == -1 && firstbase != -1)); 4158 ASSERT(!(firstbase == -1 && lastdir != -1)); 4159 4160 if (lastbase == -1) { 4161 /* 4162 * We didn't find a non-slash character. We know that 4163 * the length is non-zero, so the whole string must be 4164 * slashes. In either the dirname or the basename 4165 * case, we return '/'. 4166 */ 4167 ASSERT(firstbase == -1); 4168 firstbase = lastbase = lastdir = 0; 4169 } 4170 4171 if (firstbase == -1) { 4172 /* 4173 * The entire string consists only of a basename 4174 * component. If we're looking for dirname, we need 4175 * to change our string to be just "."; if we're 4176 * looking for a basename, we'll just set the first 4177 * character of the basename to be 0. 4178 */ 4179 if (subr == DIF_SUBR_DIRNAME) { 4180 ASSERT(lastdir == -1); 4181 src = (uintptr_t)"."; 4182 lastdir = 0; 4183 } else { 4184 firstbase = 0; 4185 } 4186 } 4187 4188 if (subr == DIF_SUBR_DIRNAME) { 4189 if (lastdir == -1) { 4190 /* 4191 * We know that we have a slash in the name -- 4192 * or lastdir would be set to 0, above. And 4193 * because lastdir is -1, we know that this 4194 * slash must be the first character. (That 4195 * is, the full string must be of the form 4196 * "/basename".) In this case, the last 4197 * character of the directory name is 0. 4198 */ 4199 lastdir = 0; 4200 } 4201 4202 start = 0; 4203 end = lastdir; 4204 } else { 4205 ASSERT(subr == DIF_SUBR_BASENAME); 4206 ASSERT(firstbase != -1 && lastbase != -1); 4207 start = firstbase; 4208 end = lastbase; 4209 } 4210 4211 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4212 dest[j] = dtrace_load8(src + i); 4213 4214 dest[j] = '\0'; 4215 regs[rd] = (uintptr_t)dest; 4216 mstate->dtms_scratch_ptr += size; 4217 break; 4218 } 4219 4220 case DIF_SUBR_CLEANPATH: { 4221 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4222 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4223 uintptr_t src = tupregs[0].dttk_value; 4224 int i = 0, j = 0; 4225 4226 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4227 regs[rd] = NULL; 4228 break; 4229 } 4230 4231 if (!DTRACE_INSCRATCH(mstate, size)) { 4232 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4233 regs[rd] = NULL; 4234 break; 4235 } 4236 4237 /* 4238 * Move forward, loading each character. 4239 */ 4240 do { 4241 c = dtrace_load8(src + i++); 4242 next: 4243 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4244 break; 4245 4246 if (c != '/') { 4247 dest[j++] = c; 4248 continue; 4249 } 4250 4251 c = dtrace_load8(src + i++); 4252 4253 if (c == '/') { 4254 /* 4255 * We have two slashes -- we can just advance 4256 * to the next character. 4257 */ 4258 goto next; 4259 } 4260 4261 if (c != '.') { 4262 /* 4263 * This is not "." and it's not ".." -- we can 4264 * just store the "/" and this character and 4265 * drive on. 4266 */ 4267 dest[j++] = '/'; 4268 dest[j++] = c; 4269 continue; 4270 } 4271 4272 c = dtrace_load8(src + i++); 4273 4274 if (c == '/') { 4275 /* 4276 * This is a "/./" component. We're not going 4277 * to store anything in the destination buffer; 4278 * we're just going to go to the next component. 4279 */ 4280 goto next; 4281 } 4282 4283 if (c != '.') { 4284 /* 4285 * This is not ".." -- we can just store the 4286 * "/." and this character and continue 4287 * processing. 4288 */ 4289 dest[j++] = '/'; 4290 dest[j++] = '.'; 4291 dest[j++] = c; 4292 continue; 4293 } 4294 4295 c = dtrace_load8(src + i++); 4296 4297 if (c != '/' && c != '\0') { 4298 /* 4299 * This is not ".." -- it's "..[mumble]". 4300 * We'll store the "/.." and this character 4301 * and continue processing. 4302 */ 4303 dest[j++] = '/'; 4304 dest[j++] = '.'; 4305 dest[j++] = '.'; 4306 dest[j++] = c; 4307 continue; 4308 } 4309 4310 /* 4311 * This is "/../" or "/..\0". We need to back up 4312 * our destination pointer until we find a "/". 4313 */ 4314 i--; 4315 while (j != 0 && dest[--j] != '/') 4316 continue; 4317 4318 if (c == '\0') 4319 dest[++j] = '/'; 4320 } while (c != '\0'); 4321 4322 dest[j] = '\0'; 4323 regs[rd] = (uintptr_t)dest; 4324 mstate->dtms_scratch_ptr += size; 4325 break; 4326 } 4327 4328 case DIF_SUBR_INET_NTOA: 4329 case DIF_SUBR_INET_NTOA6: 4330 case DIF_SUBR_INET_NTOP: { 4331 size_t size; 4332 int af, argi, i; 4333 char *base, *end; 4334 4335 if (subr == DIF_SUBR_INET_NTOP) { 4336 af = (int)tupregs[0].dttk_value; 4337 argi = 1; 4338 } else { 4339 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4340 argi = 0; 4341 } 4342 4343 if (af == AF_INET) { 4344 ipaddr_t ip4; 4345 uint8_t *ptr8, val; 4346 4347 /* 4348 * Safely load the IPv4 address. 4349 */ 4350 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4351 4352 /* 4353 * Check an IPv4 string will fit in scratch. 4354 */ 4355 size = INET_ADDRSTRLEN; 4356 if (!DTRACE_INSCRATCH(mstate, size)) { 4357 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4358 regs[rd] = NULL; 4359 break; 4360 } 4361 base = (char *)mstate->dtms_scratch_ptr; 4362 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4363 4364 /* 4365 * Stringify as a dotted decimal quad. 4366 */ 4367 *end-- = '\0'; 4368 ptr8 = (uint8_t *)&ip4; 4369 for (i = 3; i >= 0; i--) { 4370 val = ptr8[i]; 4371 4372 if (val == 0) { 4373 *end-- = '0'; 4374 } else { 4375 for (; val; val /= 10) { 4376 *end-- = '0' + (val % 10); 4377 } 4378 } 4379 4380 if (i > 0) 4381 *end-- = '.'; 4382 } 4383 ASSERT(end + 1 >= base); 4384 4385 } else if (af == AF_INET6) { 4386 struct in6_addr ip6; 4387 int firstzero, tryzero, numzero, v6end; 4388 uint16_t val; 4389 const char digits[] = "0123456789abcdef"; 4390 4391 /* 4392 * Stringify using RFC 1884 convention 2 - 16 bit 4393 * hexadecimal values with a zero-run compression. 4394 * Lower case hexadecimal digits are used. 4395 * eg, fe80::214:4fff:fe0b:76c8. 4396 * The IPv4 embedded form is returned for inet_ntop, 4397 * just the IPv4 string is returned for inet_ntoa6. 4398 */ 4399 4400 /* 4401 * Safely load the IPv6 address. 4402 */ 4403 dtrace_bcopy( 4404 (void *)(uintptr_t)tupregs[argi].dttk_value, 4405 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4406 4407 /* 4408 * Check an IPv6 string will fit in scratch. 4409 */ 4410 size = INET6_ADDRSTRLEN; 4411 if (!DTRACE_INSCRATCH(mstate, size)) { 4412 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4413 regs[rd] = NULL; 4414 break; 4415 } 4416 base = (char *)mstate->dtms_scratch_ptr; 4417 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4418 *end-- = '\0'; 4419 4420 /* 4421 * Find the longest run of 16 bit zero values 4422 * for the single allowed zero compression - "::". 4423 */ 4424 firstzero = -1; 4425 tryzero = -1; 4426 numzero = 1; 4427 for (i = 0; i < sizeof (struct in6_addr); i++) { 4428 if (ip6._S6_un._S6_u8[i] == 0 && 4429 tryzero == -1 && i % 2 == 0) { 4430 tryzero = i; 4431 continue; 4432 } 4433 4434 if (tryzero != -1 && 4435 (ip6._S6_un._S6_u8[i] != 0 || 4436 i == sizeof (struct in6_addr) - 1)) { 4437 4438 if (i - tryzero <= numzero) { 4439 tryzero = -1; 4440 continue; 4441 } 4442 4443 firstzero = tryzero; 4444 numzero = i - i % 2 - tryzero; 4445 tryzero = -1; 4446 4447 if (ip6._S6_un._S6_u8[i] == 0 && 4448 i == sizeof (struct in6_addr) - 1) 4449 numzero += 2; 4450 } 4451 } 4452 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4453 4454 /* 4455 * Check for an IPv4 embedded address. 4456 */ 4457 v6end = sizeof (struct in6_addr) - 2; 4458 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4459 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4460 for (i = sizeof (struct in6_addr) - 1; 4461 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4462 ASSERT(end >= base); 4463 4464 val = ip6._S6_un._S6_u8[i]; 4465 4466 if (val == 0) { 4467 *end-- = '0'; 4468 } else { 4469 for (; val; val /= 10) { 4470 *end-- = '0' + val % 10; 4471 } 4472 } 4473 4474 if (i > DTRACE_V4MAPPED_OFFSET) 4475 *end-- = '.'; 4476 } 4477 4478 if (subr == DIF_SUBR_INET_NTOA6) 4479 goto inetout; 4480 4481 /* 4482 * Set v6end to skip the IPv4 address that 4483 * we have already stringified. 4484 */ 4485 v6end = 10; 4486 } 4487 4488 /* 4489 * Build the IPv6 string by working through the 4490 * address in reverse. 4491 */ 4492 for (i = v6end; i >= 0; i -= 2) { 4493 ASSERT(end >= base); 4494 4495 if (i == firstzero + numzero - 2) { 4496 *end-- = ':'; 4497 *end-- = ':'; 4498 i -= numzero - 2; 4499 continue; 4500 } 4501 4502 if (i < 14 && i != firstzero - 2) 4503 *end-- = ':'; 4504 4505 val = (ip6._S6_un._S6_u8[i] << 8) + 4506 ip6._S6_un._S6_u8[i + 1]; 4507 4508 if (val == 0) { 4509 *end-- = '0'; 4510 } else { 4511 for (; val; val /= 16) { 4512 *end-- = digits[val % 16]; 4513 } 4514 } 4515 } 4516 ASSERT(end + 1 >= base); 4517 4518 } else { 4519 /* 4520 * The user didn't use AH_INET or AH_INET6. 4521 */ 4522 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4523 regs[rd] = NULL; 4524 break; 4525 } 4526 4527 inetout: regs[rd] = (uintptr_t)end + 1; 4528 mstate->dtms_scratch_ptr += size; 4529 break; 4530 } 4531 4532 } 4533 } 4534 4535 /* 4536 * Emulate the execution of DTrace IR instructions specified by the given 4537 * DIF object. This function is deliberately void of assertions as all of 4538 * the necessary checks are handled by a call to dtrace_difo_validate(). 4539 */ 4540 static uint64_t 4541 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4542 dtrace_vstate_t *vstate, dtrace_state_t *state) 4543 { 4544 const dif_instr_t *text = difo->dtdo_buf; 4545 const uint_t textlen = difo->dtdo_len; 4546 const char *strtab = difo->dtdo_strtab; 4547 const uint64_t *inttab = difo->dtdo_inttab; 4548 4549 uint64_t rval = 0; 4550 dtrace_statvar_t *svar; 4551 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4552 dtrace_difv_t *v; 4553 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4554 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4555 4556 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4557 uint64_t regs[DIF_DIR_NREGS]; 4558 uint64_t *tmp; 4559 4560 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4561 int64_t cc_r; 4562 uint_t pc = 0, id, opc; 4563 uint8_t ttop = 0; 4564 dif_instr_t instr; 4565 uint_t r1, r2, rd; 4566 4567 /* 4568 * We stash the current DIF object into the machine state: we need it 4569 * for subsequent access checking. 4570 */ 4571 mstate->dtms_difo = difo; 4572 4573 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4574 4575 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4576 opc = pc; 4577 4578 instr = text[pc++]; 4579 r1 = DIF_INSTR_R1(instr); 4580 r2 = DIF_INSTR_R2(instr); 4581 rd = DIF_INSTR_RD(instr); 4582 4583 switch (DIF_INSTR_OP(instr)) { 4584 case DIF_OP_OR: 4585 regs[rd] = regs[r1] | regs[r2]; 4586 break; 4587 case DIF_OP_XOR: 4588 regs[rd] = regs[r1] ^ regs[r2]; 4589 break; 4590 case DIF_OP_AND: 4591 regs[rd] = regs[r1] & regs[r2]; 4592 break; 4593 case DIF_OP_SLL: 4594 regs[rd] = regs[r1] << regs[r2]; 4595 break; 4596 case DIF_OP_SRL: 4597 regs[rd] = regs[r1] >> regs[r2]; 4598 break; 4599 case DIF_OP_SUB: 4600 regs[rd] = regs[r1] - regs[r2]; 4601 break; 4602 case DIF_OP_ADD: 4603 regs[rd] = regs[r1] + regs[r2]; 4604 break; 4605 case DIF_OP_MUL: 4606 regs[rd] = regs[r1] * regs[r2]; 4607 break; 4608 case DIF_OP_SDIV: 4609 if (regs[r2] == 0) { 4610 regs[rd] = 0; 4611 *flags |= CPU_DTRACE_DIVZERO; 4612 } else { 4613 regs[rd] = (int64_t)regs[r1] / 4614 (int64_t)regs[r2]; 4615 } 4616 break; 4617 4618 case DIF_OP_UDIV: 4619 if (regs[r2] == 0) { 4620 regs[rd] = 0; 4621 *flags |= CPU_DTRACE_DIVZERO; 4622 } else { 4623 regs[rd] = regs[r1] / regs[r2]; 4624 } 4625 break; 4626 4627 case DIF_OP_SREM: 4628 if (regs[r2] == 0) { 4629 regs[rd] = 0; 4630 *flags |= CPU_DTRACE_DIVZERO; 4631 } else { 4632 regs[rd] = (int64_t)regs[r1] % 4633 (int64_t)regs[r2]; 4634 } 4635 break; 4636 4637 case DIF_OP_UREM: 4638 if (regs[r2] == 0) { 4639 regs[rd] = 0; 4640 *flags |= CPU_DTRACE_DIVZERO; 4641 } else { 4642 regs[rd] = regs[r1] % regs[r2]; 4643 } 4644 break; 4645 4646 case DIF_OP_NOT: 4647 regs[rd] = ~regs[r1]; 4648 break; 4649 case DIF_OP_MOV: 4650 regs[rd] = regs[r1]; 4651 break; 4652 case DIF_OP_CMP: 4653 cc_r = regs[r1] - regs[r2]; 4654 cc_n = cc_r < 0; 4655 cc_z = cc_r == 0; 4656 cc_v = 0; 4657 cc_c = regs[r1] < regs[r2]; 4658 break; 4659 case DIF_OP_TST: 4660 cc_n = cc_v = cc_c = 0; 4661 cc_z = regs[r1] == 0; 4662 break; 4663 case DIF_OP_BA: 4664 pc = DIF_INSTR_LABEL(instr); 4665 break; 4666 case DIF_OP_BE: 4667 if (cc_z) 4668 pc = DIF_INSTR_LABEL(instr); 4669 break; 4670 case DIF_OP_BNE: 4671 if (cc_z == 0) 4672 pc = DIF_INSTR_LABEL(instr); 4673 break; 4674 case DIF_OP_BG: 4675 if ((cc_z | (cc_n ^ cc_v)) == 0) 4676 pc = DIF_INSTR_LABEL(instr); 4677 break; 4678 case DIF_OP_BGU: 4679 if ((cc_c | cc_z) == 0) 4680 pc = DIF_INSTR_LABEL(instr); 4681 break; 4682 case DIF_OP_BGE: 4683 if ((cc_n ^ cc_v) == 0) 4684 pc = DIF_INSTR_LABEL(instr); 4685 break; 4686 case DIF_OP_BGEU: 4687 if (cc_c == 0) 4688 pc = DIF_INSTR_LABEL(instr); 4689 break; 4690 case DIF_OP_BL: 4691 if (cc_n ^ cc_v) 4692 pc = DIF_INSTR_LABEL(instr); 4693 break; 4694 case DIF_OP_BLU: 4695 if (cc_c) 4696 pc = DIF_INSTR_LABEL(instr); 4697 break; 4698 case DIF_OP_BLE: 4699 if (cc_z | (cc_n ^ cc_v)) 4700 pc = DIF_INSTR_LABEL(instr); 4701 break; 4702 case DIF_OP_BLEU: 4703 if (cc_c | cc_z) 4704 pc = DIF_INSTR_LABEL(instr); 4705 break; 4706 case DIF_OP_RLDSB: 4707 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4708 *flags |= CPU_DTRACE_KPRIV; 4709 *illval = regs[r1]; 4710 break; 4711 } 4712 /*FALLTHROUGH*/ 4713 case DIF_OP_LDSB: 4714 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4715 break; 4716 case DIF_OP_RLDSH: 4717 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4718 *flags |= CPU_DTRACE_KPRIV; 4719 *illval = regs[r1]; 4720 break; 4721 } 4722 /*FALLTHROUGH*/ 4723 case DIF_OP_LDSH: 4724 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4725 break; 4726 case DIF_OP_RLDSW: 4727 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4728 *flags |= CPU_DTRACE_KPRIV; 4729 *illval = regs[r1]; 4730 break; 4731 } 4732 /*FALLTHROUGH*/ 4733 case DIF_OP_LDSW: 4734 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4735 break; 4736 case DIF_OP_RLDUB: 4737 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4738 *flags |= CPU_DTRACE_KPRIV; 4739 *illval = regs[r1]; 4740 break; 4741 } 4742 /*FALLTHROUGH*/ 4743 case DIF_OP_LDUB: 4744 regs[rd] = dtrace_load8(regs[r1]); 4745 break; 4746 case DIF_OP_RLDUH: 4747 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4748 *flags |= CPU_DTRACE_KPRIV; 4749 *illval = regs[r1]; 4750 break; 4751 } 4752 /*FALLTHROUGH*/ 4753 case DIF_OP_LDUH: 4754 regs[rd] = dtrace_load16(regs[r1]); 4755 break; 4756 case DIF_OP_RLDUW: 4757 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4758 *flags |= CPU_DTRACE_KPRIV; 4759 *illval = regs[r1]; 4760 break; 4761 } 4762 /*FALLTHROUGH*/ 4763 case DIF_OP_LDUW: 4764 regs[rd] = dtrace_load32(regs[r1]); 4765 break; 4766 case DIF_OP_RLDX: 4767 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4768 *flags |= CPU_DTRACE_KPRIV; 4769 *illval = regs[r1]; 4770 break; 4771 } 4772 /*FALLTHROUGH*/ 4773 case DIF_OP_LDX: 4774 regs[rd] = dtrace_load64(regs[r1]); 4775 break; 4776 case DIF_OP_ULDSB: 4777 regs[rd] = (int8_t) 4778 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4779 break; 4780 case DIF_OP_ULDSH: 4781 regs[rd] = (int16_t) 4782 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4783 break; 4784 case DIF_OP_ULDSW: 4785 regs[rd] = (int32_t) 4786 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4787 break; 4788 case DIF_OP_ULDUB: 4789 regs[rd] = 4790 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4791 break; 4792 case DIF_OP_ULDUH: 4793 regs[rd] = 4794 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4795 break; 4796 case DIF_OP_ULDUW: 4797 regs[rd] = 4798 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4799 break; 4800 case DIF_OP_ULDX: 4801 regs[rd] = 4802 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4803 break; 4804 case DIF_OP_RET: 4805 rval = regs[rd]; 4806 pc = textlen; 4807 break; 4808 case DIF_OP_NOP: 4809 break; 4810 case DIF_OP_SETX: 4811 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4812 break; 4813 case DIF_OP_SETS: 4814 regs[rd] = (uint64_t)(uintptr_t) 4815 (strtab + DIF_INSTR_STRING(instr)); 4816 break; 4817 case DIF_OP_SCMP: { 4818 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 4819 uintptr_t s1 = regs[r1]; 4820 uintptr_t s2 = regs[r2]; 4821 4822 if (s1 != NULL && 4823 !dtrace_strcanload(s1, sz, mstate, vstate)) 4824 break; 4825 if (s2 != NULL && 4826 !dtrace_strcanload(s2, sz, mstate, vstate)) 4827 break; 4828 4829 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 4830 4831 cc_n = cc_r < 0; 4832 cc_z = cc_r == 0; 4833 cc_v = cc_c = 0; 4834 break; 4835 } 4836 case DIF_OP_LDGA: 4837 regs[rd] = dtrace_dif_variable(mstate, state, 4838 r1, regs[r2]); 4839 break; 4840 case DIF_OP_LDGS: 4841 id = DIF_INSTR_VAR(instr); 4842 4843 if (id >= DIF_VAR_OTHER_UBASE) { 4844 uintptr_t a; 4845 4846 id -= DIF_VAR_OTHER_UBASE; 4847 svar = vstate->dtvs_globals[id]; 4848 ASSERT(svar != NULL); 4849 v = &svar->dtsv_var; 4850 4851 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4852 regs[rd] = svar->dtsv_data; 4853 break; 4854 } 4855 4856 a = (uintptr_t)svar->dtsv_data; 4857 4858 if (*(uint8_t *)a == UINT8_MAX) { 4859 /* 4860 * If the 0th byte is set to UINT8_MAX 4861 * then this is to be treated as a 4862 * reference to a NULL variable. 4863 */ 4864 regs[rd] = NULL; 4865 } else { 4866 regs[rd] = a + sizeof (uint64_t); 4867 } 4868 4869 break; 4870 } 4871 4872 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4873 break; 4874 4875 case DIF_OP_STGS: 4876 id = DIF_INSTR_VAR(instr); 4877 4878 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4879 id -= DIF_VAR_OTHER_UBASE; 4880 4881 svar = vstate->dtvs_globals[id]; 4882 ASSERT(svar != NULL); 4883 v = &svar->dtsv_var; 4884 4885 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4886 uintptr_t a = (uintptr_t)svar->dtsv_data; 4887 4888 ASSERT(a != NULL); 4889 ASSERT(svar->dtsv_size != 0); 4890 4891 if (regs[rd] == NULL) { 4892 *(uint8_t *)a = UINT8_MAX; 4893 break; 4894 } else { 4895 *(uint8_t *)a = 0; 4896 a += sizeof (uint64_t); 4897 } 4898 if (!dtrace_vcanload( 4899 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4900 mstate, vstate)) 4901 break; 4902 4903 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4904 (void *)a, &v->dtdv_type); 4905 break; 4906 } 4907 4908 svar->dtsv_data = regs[rd]; 4909 break; 4910 4911 case DIF_OP_LDTA: 4912 /* 4913 * There are no DTrace built-in thread-local arrays at 4914 * present. This opcode is saved for future work. 4915 */ 4916 *flags |= CPU_DTRACE_ILLOP; 4917 regs[rd] = 0; 4918 break; 4919 4920 case DIF_OP_LDLS: 4921 id = DIF_INSTR_VAR(instr); 4922 4923 if (id < DIF_VAR_OTHER_UBASE) { 4924 /* 4925 * For now, this has no meaning. 4926 */ 4927 regs[rd] = 0; 4928 break; 4929 } 4930 4931 id -= DIF_VAR_OTHER_UBASE; 4932 4933 ASSERT(id < vstate->dtvs_nlocals); 4934 ASSERT(vstate->dtvs_locals != NULL); 4935 4936 svar = vstate->dtvs_locals[id]; 4937 ASSERT(svar != NULL); 4938 v = &svar->dtsv_var; 4939 4940 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4941 uintptr_t a = (uintptr_t)svar->dtsv_data; 4942 size_t sz = v->dtdv_type.dtdt_size; 4943 4944 sz += sizeof (uint64_t); 4945 ASSERT(svar->dtsv_size == NCPU * sz); 4946 a += CPU->cpu_id * sz; 4947 4948 if (*(uint8_t *)a == UINT8_MAX) { 4949 /* 4950 * If the 0th byte is set to UINT8_MAX 4951 * then this is to be treated as a 4952 * reference to a NULL variable. 4953 */ 4954 regs[rd] = NULL; 4955 } else { 4956 regs[rd] = a + sizeof (uint64_t); 4957 } 4958 4959 break; 4960 } 4961 4962 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4963 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4964 regs[rd] = tmp[CPU->cpu_id]; 4965 break; 4966 4967 case DIF_OP_STLS: 4968 id = DIF_INSTR_VAR(instr); 4969 4970 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4971 id -= DIF_VAR_OTHER_UBASE; 4972 ASSERT(id < vstate->dtvs_nlocals); 4973 4974 ASSERT(vstate->dtvs_locals != NULL); 4975 svar = vstate->dtvs_locals[id]; 4976 ASSERT(svar != NULL); 4977 v = &svar->dtsv_var; 4978 4979 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4980 uintptr_t a = (uintptr_t)svar->dtsv_data; 4981 size_t sz = v->dtdv_type.dtdt_size; 4982 4983 sz += sizeof (uint64_t); 4984 ASSERT(svar->dtsv_size == NCPU * sz); 4985 a += CPU->cpu_id * sz; 4986 4987 if (regs[rd] == NULL) { 4988 *(uint8_t *)a = UINT8_MAX; 4989 break; 4990 } else { 4991 *(uint8_t *)a = 0; 4992 a += sizeof (uint64_t); 4993 } 4994 4995 if (!dtrace_vcanload( 4996 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4997 mstate, vstate)) 4998 break; 4999 5000 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5001 (void *)a, &v->dtdv_type); 5002 break; 5003 } 5004 5005 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5006 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5007 tmp[CPU->cpu_id] = regs[rd]; 5008 break; 5009 5010 case DIF_OP_LDTS: { 5011 dtrace_dynvar_t *dvar; 5012 dtrace_key_t *key; 5013 5014 id = DIF_INSTR_VAR(instr); 5015 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5016 id -= DIF_VAR_OTHER_UBASE; 5017 v = &vstate->dtvs_tlocals[id]; 5018 5019 key = &tupregs[DIF_DTR_NREGS]; 5020 key[0].dttk_value = (uint64_t)id; 5021 key[0].dttk_size = 0; 5022 DTRACE_TLS_THRKEY(key[1].dttk_value); 5023 key[1].dttk_size = 0; 5024 5025 dvar = dtrace_dynvar(dstate, 2, key, 5026 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5027 mstate, vstate); 5028 5029 if (dvar == NULL) { 5030 regs[rd] = 0; 5031 break; 5032 } 5033 5034 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5035 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5036 } else { 5037 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5038 } 5039 5040 break; 5041 } 5042 5043 case DIF_OP_STTS: { 5044 dtrace_dynvar_t *dvar; 5045 dtrace_key_t *key; 5046 5047 id = DIF_INSTR_VAR(instr); 5048 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5049 id -= DIF_VAR_OTHER_UBASE; 5050 5051 key = &tupregs[DIF_DTR_NREGS]; 5052 key[0].dttk_value = (uint64_t)id; 5053 key[0].dttk_size = 0; 5054 DTRACE_TLS_THRKEY(key[1].dttk_value); 5055 key[1].dttk_size = 0; 5056 v = &vstate->dtvs_tlocals[id]; 5057 5058 dvar = dtrace_dynvar(dstate, 2, key, 5059 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5060 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5061 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5062 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5063 5064 /* 5065 * Given that we're storing to thread-local data, 5066 * we need to flush our predicate cache. 5067 */ 5068 curthread->t_predcache = NULL; 5069 5070 if (dvar == NULL) 5071 break; 5072 5073 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5074 if (!dtrace_vcanload( 5075 (void *)(uintptr_t)regs[rd], 5076 &v->dtdv_type, mstate, vstate)) 5077 break; 5078 5079 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5080 dvar->dtdv_data, &v->dtdv_type); 5081 } else { 5082 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5083 } 5084 5085 break; 5086 } 5087 5088 case DIF_OP_SRA: 5089 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5090 break; 5091 5092 case DIF_OP_CALL: 5093 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5094 regs, tupregs, ttop, mstate, state); 5095 break; 5096 5097 case DIF_OP_PUSHTR: 5098 if (ttop == DIF_DTR_NREGS) { 5099 *flags |= CPU_DTRACE_TUPOFLOW; 5100 break; 5101 } 5102 5103 if (r1 == DIF_TYPE_STRING) { 5104 /* 5105 * If this is a string type and the size is 0, 5106 * we'll use the system-wide default string 5107 * size. Note that we are _not_ looking at 5108 * the value of the DTRACEOPT_STRSIZE option; 5109 * had this been set, we would expect to have 5110 * a non-zero size value in the "pushtr". 5111 */ 5112 tupregs[ttop].dttk_size = 5113 dtrace_strlen((char *)(uintptr_t)regs[rd], 5114 regs[r2] ? regs[r2] : 5115 dtrace_strsize_default) + 1; 5116 } else { 5117 tupregs[ttop].dttk_size = regs[r2]; 5118 } 5119 5120 tupregs[ttop++].dttk_value = regs[rd]; 5121 break; 5122 5123 case DIF_OP_PUSHTV: 5124 if (ttop == DIF_DTR_NREGS) { 5125 *flags |= CPU_DTRACE_TUPOFLOW; 5126 break; 5127 } 5128 5129 tupregs[ttop].dttk_value = regs[rd]; 5130 tupregs[ttop++].dttk_size = 0; 5131 break; 5132 5133 case DIF_OP_POPTS: 5134 if (ttop != 0) 5135 ttop--; 5136 break; 5137 5138 case DIF_OP_FLUSHTS: 5139 ttop = 0; 5140 break; 5141 5142 case DIF_OP_LDGAA: 5143 case DIF_OP_LDTAA: { 5144 dtrace_dynvar_t *dvar; 5145 dtrace_key_t *key = tupregs; 5146 uint_t nkeys = ttop; 5147 5148 id = DIF_INSTR_VAR(instr); 5149 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5150 id -= DIF_VAR_OTHER_UBASE; 5151 5152 key[nkeys].dttk_value = (uint64_t)id; 5153 key[nkeys++].dttk_size = 0; 5154 5155 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5156 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5157 key[nkeys++].dttk_size = 0; 5158 v = &vstate->dtvs_tlocals[id]; 5159 } else { 5160 v = &vstate->dtvs_globals[id]->dtsv_var; 5161 } 5162 5163 dvar = dtrace_dynvar(dstate, nkeys, key, 5164 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5165 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5166 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5167 5168 if (dvar == NULL) { 5169 regs[rd] = 0; 5170 break; 5171 } 5172 5173 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5174 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5175 } else { 5176 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5177 } 5178 5179 break; 5180 } 5181 5182 case DIF_OP_STGAA: 5183 case DIF_OP_STTAA: { 5184 dtrace_dynvar_t *dvar; 5185 dtrace_key_t *key = tupregs; 5186 uint_t nkeys = ttop; 5187 5188 id = DIF_INSTR_VAR(instr); 5189 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5190 id -= DIF_VAR_OTHER_UBASE; 5191 5192 key[nkeys].dttk_value = (uint64_t)id; 5193 key[nkeys++].dttk_size = 0; 5194 5195 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5196 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5197 key[nkeys++].dttk_size = 0; 5198 v = &vstate->dtvs_tlocals[id]; 5199 } else { 5200 v = &vstate->dtvs_globals[id]->dtsv_var; 5201 } 5202 5203 dvar = dtrace_dynvar(dstate, nkeys, key, 5204 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5205 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5206 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5207 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5208 5209 if (dvar == NULL) 5210 break; 5211 5212 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5213 if (!dtrace_vcanload( 5214 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5215 mstate, vstate)) 5216 break; 5217 5218 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5219 dvar->dtdv_data, &v->dtdv_type); 5220 } else { 5221 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5222 } 5223 5224 break; 5225 } 5226 5227 case DIF_OP_ALLOCS: { 5228 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5229 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5230 5231 /* 5232 * Rounding up the user allocation size could have 5233 * overflowed large, bogus allocations (like -1ULL) to 5234 * 0. 5235 */ 5236 if (size < regs[r1] || 5237 !DTRACE_INSCRATCH(mstate, size)) { 5238 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5239 regs[rd] = NULL; 5240 break; 5241 } 5242 5243 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5244 mstate->dtms_scratch_ptr += size; 5245 regs[rd] = ptr; 5246 break; 5247 } 5248 5249 case DIF_OP_COPYS: 5250 if (!dtrace_canstore(regs[rd], regs[r2], 5251 mstate, vstate)) { 5252 *flags |= CPU_DTRACE_BADADDR; 5253 *illval = regs[rd]; 5254 break; 5255 } 5256 5257 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5258 break; 5259 5260 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5261 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5262 break; 5263 5264 case DIF_OP_STB: 5265 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5266 *flags |= CPU_DTRACE_BADADDR; 5267 *illval = regs[rd]; 5268 break; 5269 } 5270 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5271 break; 5272 5273 case DIF_OP_STH: 5274 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5275 *flags |= CPU_DTRACE_BADADDR; 5276 *illval = regs[rd]; 5277 break; 5278 } 5279 if (regs[rd] & 1) { 5280 *flags |= CPU_DTRACE_BADALIGN; 5281 *illval = regs[rd]; 5282 break; 5283 } 5284 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5285 break; 5286 5287 case DIF_OP_STW: 5288 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5289 *flags |= CPU_DTRACE_BADADDR; 5290 *illval = regs[rd]; 5291 break; 5292 } 5293 if (regs[rd] & 3) { 5294 *flags |= CPU_DTRACE_BADALIGN; 5295 *illval = regs[rd]; 5296 break; 5297 } 5298 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5299 break; 5300 5301 case DIF_OP_STX: 5302 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5303 *flags |= CPU_DTRACE_BADADDR; 5304 *illval = regs[rd]; 5305 break; 5306 } 5307 if (regs[rd] & 7) { 5308 *flags |= CPU_DTRACE_BADALIGN; 5309 *illval = regs[rd]; 5310 break; 5311 } 5312 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5313 break; 5314 } 5315 } 5316 5317 if (!(*flags & CPU_DTRACE_FAULT)) 5318 return (rval); 5319 5320 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5321 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5322 5323 return (0); 5324 } 5325 5326 static void 5327 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5328 { 5329 dtrace_probe_t *probe = ecb->dte_probe; 5330 dtrace_provider_t *prov = probe->dtpr_provider; 5331 char c[DTRACE_FULLNAMELEN + 80], *str; 5332 char *msg = "dtrace: breakpoint action at probe "; 5333 char *ecbmsg = " (ecb "; 5334 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5335 uintptr_t val = (uintptr_t)ecb; 5336 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5337 5338 if (dtrace_destructive_disallow) 5339 return; 5340 5341 /* 5342 * It's impossible to be taking action on the NULL probe. 5343 */ 5344 ASSERT(probe != NULL); 5345 5346 /* 5347 * This is a poor man's (destitute man's?) sprintf(): we want to 5348 * print the provider name, module name, function name and name of 5349 * the probe, along with the hex address of the ECB with the breakpoint 5350 * action -- all of which we must place in the character buffer by 5351 * hand. 5352 */ 5353 while (*msg != '\0') 5354 c[i++] = *msg++; 5355 5356 for (str = prov->dtpv_name; *str != '\0'; str++) 5357 c[i++] = *str; 5358 c[i++] = ':'; 5359 5360 for (str = probe->dtpr_mod; *str != '\0'; str++) 5361 c[i++] = *str; 5362 c[i++] = ':'; 5363 5364 for (str = probe->dtpr_func; *str != '\0'; str++) 5365 c[i++] = *str; 5366 c[i++] = ':'; 5367 5368 for (str = probe->dtpr_name; *str != '\0'; str++) 5369 c[i++] = *str; 5370 5371 while (*ecbmsg != '\0') 5372 c[i++] = *ecbmsg++; 5373 5374 while (shift >= 0) { 5375 mask = (uintptr_t)0xf << shift; 5376 5377 if (val >= ((uintptr_t)1 << shift)) 5378 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5379 shift -= 4; 5380 } 5381 5382 c[i++] = ')'; 5383 c[i] = '\0'; 5384 5385 debug_enter(c); 5386 } 5387 5388 static void 5389 dtrace_action_panic(dtrace_ecb_t *ecb) 5390 { 5391 dtrace_probe_t *probe = ecb->dte_probe; 5392 5393 /* 5394 * It's impossible to be taking action on the NULL probe. 5395 */ 5396 ASSERT(probe != NULL); 5397 5398 if (dtrace_destructive_disallow) 5399 return; 5400 5401 if (dtrace_panicked != NULL) 5402 return; 5403 5404 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5405 return; 5406 5407 /* 5408 * We won the right to panic. (We want to be sure that only one 5409 * thread calls panic() from dtrace_probe(), and that panic() is 5410 * called exactly once.) 5411 */ 5412 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5413 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5414 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5415 } 5416 5417 static void 5418 dtrace_action_raise(uint64_t sig) 5419 { 5420 if (dtrace_destructive_disallow) 5421 return; 5422 5423 if (sig >= NSIG) { 5424 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5425 return; 5426 } 5427 5428 /* 5429 * raise() has a queue depth of 1 -- we ignore all subsequent 5430 * invocations of the raise() action. 5431 */ 5432 if (curthread->t_dtrace_sig == 0) 5433 curthread->t_dtrace_sig = (uint8_t)sig; 5434 5435 curthread->t_sig_check = 1; 5436 aston(curthread); 5437 } 5438 5439 static void 5440 dtrace_action_stop(void) 5441 { 5442 if (dtrace_destructive_disallow) 5443 return; 5444 5445 if (!curthread->t_dtrace_stop) { 5446 curthread->t_dtrace_stop = 1; 5447 curthread->t_sig_check = 1; 5448 aston(curthread); 5449 } 5450 } 5451 5452 static void 5453 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5454 { 5455 hrtime_t now; 5456 volatile uint16_t *flags; 5457 cpu_t *cpu = CPU; 5458 5459 if (dtrace_destructive_disallow) 5460 return; 5461 5462 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5463 5464 now = dtrace_gethrtime(); 5465 5466 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5467 /* 5468 * We need to advance the mark to the current time. 5469 */ 5470 cpu->cpu_dtrace_chillmark = now; 5471 cpu->cpu_dtrace_chilled = 0; 5472 } 5473 5474 /* 5475 * Now check to see if the requested chill time would take us over 5476 * the maximum amount of time allowed in the chill interval. (Or 5477 * worse, if the calculation itself induces overflow.) 5478 */ 5479 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5480 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5481 *flags |= CPU_DTRACE_ILLOP; 5482 return; 5483 } 5484 5485 while (dtrace_gethrtime() - now < val) 5486 continue; 5487 5488 /* 5489 * Normally, we assure that the value of the variable "timestamp" does 5490 * not change within an ECB. The presence of chill() represents an 5491 * exception to this rule, however. 5492 */ 5493 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5494 cpu->cpu_dtrace_chilled += val; 5495 } 5496 5497 static void 5498 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5499 uint64_t *buf, uint64_t arg) 5500 { 5501 int nframes = DTRACE_USTACK_NFRAMES(arg); 5502 int strsize = DTRACE_USTACK_STRSIZE(arg); 5503 uint64_t *pcs = &buf[1], *fps; 5504 char *str = (char *)&pcs[nframes]; 5505 int size, offs = 0, i, j; 5506 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5507 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5508 char *sym; 5509 5510 /* 5511 * Should be taking a faster path if string space has not been 5512 * allocated. 5513 */ 5514 ASSERT(strsize != 0); 5515 5516 /* 5517 * We will first allocate some temporary space for the frame pointers. 5518 */ 5519 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5520 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5521 (nframes * sizeof (uint64_t)); 5522 5523 if (!DTRACE_INSCRATCH(mstate, size)) { 5524 /* 5525 * Not enough room for our frame pointers -- need to indicate 5526 * that we ran out of scratch space. 5527 */ 5528 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5529 return; 5530 } 5531 5532 mstate->dtms_scratch_ptr += size; 5533 saved = mstate->dtms_scratch_ptr; 5534 5535 /* 5536 * Now get a stack with both program counters and frame pointers. 5537 */ 5538 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5539 dtrace_getufpstack(buf, fps, nframes + 1); 5540 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5541 5542 /* 5543 * If that faulted, we're cooked. 5544 */ 5545 if (*flags & CPU_DTRACE_FAULT) 5546 goto out; 5547 5548 /* 5549 * Now we want to walk up the stack, calling the USTACK helper. For 5550 * each iteration, we restore the scratch pointer. 5551 */ 5552 for (i = 0; i < nframes; i++) { 5553 mstate->dtms_scratch_ptr = saved; 5554 5555 if (offs >= strsize) 5556 break; 5557 5558 sym = (char *)(uintptr_t)dtrace_helper( 5559 DTRACE_HELPER_ACTION_USTACK, 5560 mstate, state, pcs[i], fps[i]); 5561 5562 /* 5563 * If we faulted while running the helper, we're going to 5564 * clear the fault and null out the corresponding string. 5565 */ 5566 if (*flags & CPU_DTRACE_FAULT) { 5567 *flags &= ~CPU_DTRACE_FAULT; 5568 str[offs++] = '\0'; 5569 continue; 5570 } 5571 5572 if (sym == NULL) { 5573 str[offs++] = '\0'; 5574 continue; 5575 } 5576 5577 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5578 5579 /* 5580 * Now copy in the string that the helper returned to us. 5581 */ 5582 for (j = 0; offs + j < strsize; j++) { 5583 if ((str[offs + j] = sym[j]) == '\0') 5584 break; 5585 } 5586 5587 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5588 5589 offs += j + 1; 5590 } 5591 5592 if (offs >= strsize) { 5593 /* 5594 * If we didn't have room for all of the strings, we don't 5595 * abort processing -- this needn't be a fatal error -- but we 5596 * still want to increment a counter (dts_stkstroverflows) to 5597 * allow this condition to be warned about. (If this is from 5598 * a jstack() action, it is easily tuned via jstackstrsize.) 5599 */ 5600 dtrace_error(&state->dts_stkstroverflows); 5601 } 5602 5603 while (offs < strsize) 5604 str[offs++] = '\0'; 5605 5606 out: 5607 mstate->dtms_scratch_ptr = old; 5608 } 5609 5610 /* 5611 * If you're looking for the epicenter of DTrace, you just found it. This 5612 * is the function called by the provider to fire a probe -- from which all 5613 * subsequent probe-context DTrace activity emanates. 5614 */ 5615 void 5616 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5617 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5618 { 5619 processorid_t cpuid; 5620 dtrace_icookie_t cookie; 5621 dtrace_probe_t *probe; 5622 dtrace_mstate_t mstate; 5623 dtrace_ecb_t *ecb; 5624 dtrace_action_t *act; 5625 intptr_t offs; 5626 size_t size; 5627 int vtime, onintr; 5628 volatile uint16_t *flags; 5629 hrtime_t now; 5630 5631 /* 5632 * Kick out immediately if this CPU is still being born (in which case 5633 * curthread will be set to -1) or the current thread can't allow 5634 * probes in its current context. 5635 */ 5636 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5637 return; 5638 5639 cookie = dtrace_interrupt_disable(); 5640 probe = dtrace_probes[id - 1]; 5641 cpuid = CPU->cpu_id; 5642 onintr = CPU_ON_INTR(CPU); 5643 5644 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5645 probe->dtpr_predcache == curthread->t_predcache) { 5646 /* 5647 * We have hit in the predicate cache; we know that 5648 * this predicate would evaluate to be false. 5649 */ 5650 dtrace_interrupt_enable(cookie); 5651 return; 5652 } 5653 5654 if (panic_quiesce) { 5655 /* 5656 * We don't trace anything if we're panicking. 5657 */ 5658 dtrace_interrupt_enable(cookie); 5659 return; 5660 } 5661 5662 now = dtrace_gethrtime(); 5663 vtime = dtrace_vtime_references != 0; 5664 5665 if (vtime && curthread->t_dtrace_start) 5666 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5667 5668 mstate.dtms_difo = NULL; 5669 mstate.dtms_probe = probe; 5670 mstate.dtms_strtok = NULL; 5671 mstate.dtms_arg[0] = arg0; 5672 mstate.dtms_arg[1] = arg1; 5673 mstate.dtms_arg[2] = arg2; 5674 mstate.dtms_arg[3] = arg3; 5675 mstate.dtms_arg[4] = arg4; 5676 5677 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5678 5679 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5680 dtrace_predicate_t *pred = ecb->dte_predicate; 5681 dtrace_state_t *state = ecb->dte_state; 5682 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5683 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5684 dtrace_vstate_t *vstate = &state->dts_vstate; 5685 dtrace_provider_t *prov = probe->dtpr_provider; 5686 int committed = 0; 5687 caddr_t tomax; 5688 5689 /* 5690 * A little subtlety with the following (seemingly innocuous) 5691 * declaration of the automatic 'val': by looking at the 5692 * code, you might think that it could be declared in the 5693 * action processing loop, below. (That is, it's only used in 5694 * the action processing loop.) However, it must be declared 5695 * out of that scope because in the case of DIF expression 5696 * arguments to aggregating actions, one iteration of the 5697 * action loop will use the last iteration's value. 5698 */ 5699 #ifdef lint 5700 uint64_t val = 0; 5701 #else 5702 uint64_t val; 5703 #endif 5704 5705 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5706 *flags &= ~CPU_DTRACE_ERROR; 5707 5708 if (prov == dtrace_provider) { 5709 /* 5710 * If dtrace itself is the provider of this probe, 5711 * we're only going to continue processing the ECB if 5712 * arg0 (the dtrace_state_t) is equal to the ECB's 5713 * creating state. (This prevents disjoint consumers 5714 * from seeing one another's metaprobes.) 5715 */ 5716 if (arg0 != (uint64_t)(uintptr_t)state) 5717 continue; 5718 } 5719 5720 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5721 /* 5722 * We're not currently active. If our provider isn't 5723 * the dtrace pseudo provider, we're not interested. 5724 */ 5725 if (prov != dtrace_provider) 5726 continue; 5727 5728 /* 5729 * Now we must further check if we are in the BEGIN 5730 * probe. If we are, we will only continue processing 5731 * if we're still in WARMUP -- if one BEGIN enabling 5732 * has invoked the exit() action, we don't want to 5733 * evaluate subsequent BEGIN enablings. 5734 */ 5735 if (probe->dtpr_id == dtrace_probeid_begin && 5736 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5737 ASSERT(state->dts_activity == 5738 DTRACE_ACTIVITY_DRAINING); 5739 continue; 5740 } 5741 } 5742 5743 if (ecb->dte_cond) { 5744 /* 5745 * If the dte_cond bits indicate that this 5746 * consumer is only allowed to see user-mode firings 5747 * of this probe, call the provider's dtps_usermode() 5748 * entry point to check that the probe was fired 5749 * while in a user context. Skip this ECB if that's 5750 * not the case. 5751 */ 5752 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5753 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5754 probe->dtpr_id, probe->dtpr_arg) == 0) 5755 continue; 5756 5757 /* 5758 * This is more subtle than it looks. We have to be 5759 * absolutely certain that CRED() isn't going to 5760 * change out from under us so it's only legit to 5761 * examine that structure if we're in constrained 5762 * situations. Currently, the only times we'll this 5763 * check is if a non-super-user has enabled the 5764 * profile or syscall providers -- providers that 5765 * allow visibility of all processes. For the 5766 * profile case, the check above will ensure that 5767 * we're examining a user context. 5768 */ 5769 if (ecb->dte_cond & DTRACE_COND_OWNER) { 5770 cred_t *cr; 5771 cred_t *s_cr = 5772 ecb->dte_state->dts_cred.dcr_cred; 5773 proc_t *proc; 5774 5775 ASSERT(s_cr != NULL); 5776 5777 if ((cr = CRED()) == NULL || 5778 s_cr->cr_uid != cr->cr_uid || 5779 s_cr->cr_uid != cr->cr_ruid || 5780 s_cr->cr_uid != cr->cr_suid || 5781 s_cr->cr_gid != cr->cr_gid || 5782 s_cr->cr_gid != cr->cr_rgid || 5783 s_cr->cr_gid != cr->cr_sgid || 5784 (proc = ttoproc(curthread)) == NULL || 5785 (proc->p_flag & SNOCD)) 5786 continue; 5787 } 5788 5789 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 5790 cred_t *cr; 5791 cred_t *s_cr = 5792 ecb->dte_state->dts_cred.dcr_cred; 5793 5794 ASSERT(s_cr != NULL); 5795 5796 if ((cr = CRED()) == NULL || 5797 s_cr->cr_zone->zone_id != 5798 cr->cr_zone->zone_id) 5799 continue; 5800 } 5801 } 5802 5803 if (now - state->dts_alive > dtrace_deadman_timeout) { 5804 /* 5805 * We seem to be dead. Unless we (a) have kernel 5806 * destructive permissions (b) have expicitly enabled 5807 * destructive actions and (c) destructive actions have 5808 * not been disabled, we're going to transition into 5809 * the KILLED state, from which no further processing 5810 * on this state will be performed. 5811 */ 5812 if (!dtrace_priv_kernel_destructive(state) || 5813 !state->dts_cred.dcr_destructive || 5814 dtrace_destructive_disallow) { 5815 void *activity = &state->dts_activity; 5816 dtrace_activity_t current; 5817 5818 do { 5819 current = state->dts_activity; 5820 } while (dtrace_cas32(activity, current, 5821 DTRACE_ACTIVITY_KILLED) != current); 5822 5823 continue; 5824 } 5825 } 5826 5827 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 5828 ecb->dte_alignment, state, &mstate)) < 0) 5829 continue; 5830 5831 tomax = buf->dtb_tomax; 5832 ASSERT(tomax != NULL); 5833 5834 if (ecb->dte_size != 0) 5835 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5836 5837 mstate.dtms_epid = ecb->dte_epid; 5838 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5839 5840 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 5841 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 5842 else 5843 mstate.dtms_access = 0; 5844 5845 if (pred != NULL) { 5846 dtrace_difo_t *dp = pred->dtp_difo; 5847 int rval; 5848 5849 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5850 5851 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5852 dtrace_cacheid_t cid = probe->dtpr_predcache; 5853 5854 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5855 /* 5856 * Update the predicate cache... 5857 */ 5858 ASSERT(cid == pred->dtp_cacheid); 5859 curthread->t_predcache = cid; 5860 } 5861 5862 continue; 5863 } 5864 } 5865 5866 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5867 act != NULL; act = act->dta_next) { 5868 size_t valoffs; 5869 dtrace_difo_t *dp; 5870 dtrace_recdesc_t *rec = &act->dta_rec; 5871 5872 size = rec->dtrd_size; 5873 valoffs = offs + rec->dtrd_offset; 5874 5875 if (DTRACEACT_ISAGG(act->dta_kind)) { 5876 uint64_t v = 0xbad; 5877 dtrace_aggregation_t *agg; 5878 5879 agg = (dtrace_aggregation_t *)act; 5880 5881 if ((dp = act->dta_difo) != NULL) 5882 v = dtrace_dif_emulate(dp, 5883 &mstate, vstate, state); 5884 5885 if (*flags & CPU_DTRACE_ERROR) 5886 continue; 5887 5888 /* 5889 * Note that we always pass the expression 5890 * value from the previous iteration of the 5891 * action loop. This value will only be used 5892 * if there is an expression argument to the 5893 * aggregating action, denoted by the 5894 * dtag_hasarg field. 5895 */ 5896 dtrace_aggregate(agg, buf, 5897 offs, aggbuf, v, val); 5898 continue; 5899 } 5900 5901 switch (act->dta_kind) { 5902 case DTRACEACT_STOP: 5903 if (dtrace_priv_proc_destructive(state)) 5904 dtrace_action_stop(); 5905 continue; 5906 5907 case DTRACEACT_BREAKPOINT: 5908 if (dtrace_priv_kernel_destructive(state)) 5909 dtrace_action_breakpoint(ecb); 5910 continue; 5911 5912 case DTRACEACT_PANIC: 5913 if (dtrace_priv_kernel_destructive(state)) 5914 dtrace_action_panic(ecb); 5915 continue; 5916 5917 case DTRACEACT_STACK: 5918 if (!dtrace_priv_kernel(state)) 5919 continue; 5920 5921 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5922 size / sizeof (pc_t), probe->dtpr_aframes, 5923 DTRACE_ANCHORED(probe) ? NULL : 5924 (uint32_t *)arg0); 5925 5926 continue; 5927 5928 case DTRACEACT_JSTACK: 5929 case DTRACEACT_USTACK: 5930 if (!dtrace_priv_proc(state)) 5931 continue; 5932 5933 /* 5934 * See comment in DIF_VAR_PID. 5935 */ 5936 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5937 CPU_ON_INTR(CPU)) { 5938 int depth = DTRACE_USTACK_NFRAMES( 5939 rec->dtrd_arg) + 1; 5940 5941 dtrace_bzero((void *)(tomax + valoffs), 5942 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5943 + depth * sizeof (uint64_t)); 5944 5945 continue; 5946 } 5947 5948 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5949 curproc->p_dtrace_helpers != NULL) { 5950 /* 5951 * This is the slow path -- we have 5952 * allocated string space, and we're 5953 * getting the stack of a process that 5954 * has helpers. Call into a separate 5955 * routine to perform this processing. 5956 */ 5957 dtrace_action_ustack(&mstate, state, 5958 (uint64_t *)(tomax + valoffs), 5959 rec->dtrd_arg); 5960 continue; 5961 } 5962 5963 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5964 dtrace_getupcstack((uint64_t *) 5965 (tomax + valoffs), 5966 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5967 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5968 continue; 5969 5970 default: 5971 break; 5972 } 5973 5974 dp = act->dta_difo; 5975 ASSERT(dp != NULL); 5976 5977 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5978 5979 if (*flags & CPU_DTRACE_ERROR) 5980 continue; 5981 5982 switch (act->dta_kind) { 5983 case DTRACEACT_SPECULATE: 5984 ASSERT(buf == &state->dts_buffer[cpuid]); 5985 buf = dtrace_speculation_buffer(state, 5986 cpuid, val); 5987 5988 if (buf == NULL) { 5989 *flags |= CPU_DTRACE_DROP; 5990 continue; 5991 } 5992 5993 offs = dtrace_buffer_reserve(buf, 5994 ecb->dte_needed, ecb->dte_alignment, 5995 state, NULL); 5996 5997 if (offs < 0) { 5998 *flags |= CPU_DTRACE_DROP; 5999 continue; 6000 } 6001 6002 tomax = buf->dtb_tomax; 6003 ASSERT(tomax != NULL); 6004 6005 if (ecb->dte_size != 0) 6006 DTRACE_STORE(uint32_t, tomax, offs, 6007 ecb->dte_epid); 6008 continue; 6009 6010 case DTRACEACT_CHILL: 6011 if (dtrace_priv_kernel_destructive(state)) 6012 dtrace_action_chill(&mstate, val); 6013 continue; 6014 6015 case DTRACEACT_RAISE: 6016 if (dtrace_priv_proc_destructive(state)) 6017 dtrace_action_raise(val); 6018 continue; 6019 6020 case DTRACEACT_COMMIT: 6021 ASSERT(!committed); 6022 6023 /* 6024 * We need to commit our buffer state. 6025 */ 6026 if (ecb->dte_size) 6027 buf->dtb_offset = offs + ecb->dte_size; 6028 buf = &state->dts_buffer[cpuid]; 6029 dtrace_speculation_commit(state, cpuid, val); 6030 committed = 1; 6031 continue; 6032 6033 case DTRACEACT_DISCARD: 6034 dtrace_speculation_discard(state, cpuid, val); 6035 continue; 6036 6037 case DTRACEACT_DIFEXPR: 6038 case DTRACEACT_LIBACT: 6039 case DTRACEACT_PRINTF: 6040 case DTRACEACT_PRINTA: 6041 case DTRACEACT_SYSTEM: 6042 case DTRACEACT_FREOPEN: 6043 break; 6044 6045 case DTRACEACT_SYM: 6046 case DTRACEACT_MOD: 6047 if (!dtrace_priv_kernel(state)) 6048 continue; 6049 break; 6050 6051 case DTRACEACT_USYM: 6052 case DTRACEACT_UMOD: 6053 case DTRACEACT_UADDR: { 6054 struct pid *pid = curthread->t_procp->p_pidp; 6055 6056 if (!dtrace_priv_proc(state)) 6057 continue; 6058 6059 DTRACE_STORE(uint64_t, tomax, 6060 valoffs, (uint64_t)pid->pid_id); 6061 DTRACE_STORE(uint64_t, tomax, 6062 valoffs + sizeof (uint64_t), val); 6063 6064 continue; 6065 } 6066 6067 case DTRACEACT_EXIT: { 6068 /* 6069 * For the exit action, we are going to attempt 6070 * to atomically set our activity to be 6071 * draining. If this fails (either because 6072 * another CPU has beat us to the exit action, 6073 * or because our current activity is something 6074 * other than ACTIVE or WARMUP), we will 6075 * continue. This assures that the exit action 6076 * can be successfully recorded at most once 6077 * when we're in the ACTIVE state. If we're 6078 * encountering the exit() action while in 6079 * COOLDOWN, however, we want to honor the new 6080 * status code. (We know that we're the only 6081 * thread in COOLDOWN, so there is no race.) 6082 */ 6083 void *activity = &state->dts_activity; 6084 dtrace_activity_t current = state->dts_activity; 6085 6086 if (current == DTRACE_ACTIVITY_COOLDOWN) 6087 break; 6088 6089 if (current != DTRACE_ACTIVITY_WARMUP) 6090 current = DTRACE_ACTIVITY_ACTIVE; 6091 6092 if (dtrace_cas32(activity, current, 6093 DTRACE_ACTIVITY_DRAINING) != current) { 6094 *flags |= CPU_DTRACE_DROP; 6095 continue; 6096 } 6097 6098 break; 6099 } 6100 6101 default: 6102 ASSERT(0); 6103 } 6104 6105 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6106 uintptr_t end = valoffs + size; 6107 6108 if (!dtrace_vcanload((void *)(uintptr_t)val, 6109 &dp->dtdo_rtype, &mstate, vstate)) 6110 continue; 6111 6112 /* 6113 * If this is a string, we're going to only 6114 * load until we find the zero byte -- after 6115 * which we'll store zero bytes. 6116 */ 6117 if (dp->dtdo_rtype.dtdt_kind == 6118 DIF_TYPE_STRING) { 6119 char c = '\0' + 1; 6120 int intuple = act->dta_intuple; 6121 size_t s; 6122 6123 for (s = 0; s < size; s++) { 6124 if (c != '\0') 6125 c = dtrace_load8(val++); 6126 6127 DTRACE_STORE(uint8_t, tomax, 6128 valoffs++, c); 6129 6130 if (c == '\0' && intuple) 6131 break; 6132 } 6133 6134 continue; 6135 } 6136 6137 while (valoffs < end) { 6138 DTRACE_STORE(uint8_t, tomax, valoffs++, 6139 dtrace_load8(val++)); 6140 } 6141 6142 continue; 6143 } 6144 6145 switch (size) { 6146 case 0: 6147 break; 6148 6149 case sizeof (uint8_t): 6150 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6151 break; 6152 case sizeof (uint16_t): 6153 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6154 break; 6155 case sizeof (uint32_t): 6156 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6157 break; 6158 case sizeof (uint64_t): 6159 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6160 break; 6161 default: 6162 /* 6163 * Any other size should have been returned by 6164 * reference, not by value. 6165 */ 6166 ASSERT(0); 6167 break; 6168 } 6169 } 6170 6171 if (*flags & CPU_DTRACE_DROP) 6172 continue; 6173 6174 if (*flags & CPU_DTRACE_FAULT) { 6175 int ndx; 6176 dtrace_action_t *err; 6177 6178 buf->dtb_errors++; 6179 6180 if (probe->dtpr_id == dtrace_probeid_error) { 6181 /* 6182 * There's nothing we can do -- we had an 6183 * error on the error probe. We bump an 6184 * error counter to at least indicate that 6185 * this condition happened. 6186 */ 6187 dtrace_error(&state->dts_dblerrors); 6188 continue; 6189 } 6190 6191 if (vtime) { 6192 /* 6193 * Before recursing on dtrace_probe(), we 6194 * need to explicitly clear out our start 6195 * time to prevent it from being accumulated 6196 * into t_dtrace_vtime. 6197 */ 6198 curthread->t_dtrace_start = 0; 6199 } 6200 6201 /* 6202 * Iterate over the actions to figure out which action 6203 * we were processing when we experienced the error. 6204 * Note that act points _past_ the faulting action; if 6205 * act is ecb->dte_action, the fault was in the 6206 * predicate, if it's ecb->dte_action->dta_next it's 6207 * in action #1, and so on. 6208 */ 6209 for (err = ecb->dte_action, ndx = 0; 6210 err != act; err = err->dta_next, ndx++) 6211 continue; 6212 6213 dtrace_probe_error(state, ecb->dte_epid, ndx, 6214 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6215 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6216 cpu_core[cpuid].cpuc_dtrace_illval); 6217 6218 continue; 6219 } 6220 6221 if (!committed) 6222 buf->dtb_offset = offs + ecb->dte_size; 6223 } 6224 6225 if (vtime) 6226 curthread->t_dtrace_start = dtrace_gethrtime(); 6227 6228 dtrace_interrupt_enable(cookie); 6229 } 6230 6231 /* 6232 * DTrace Probe Hashing Functions 6233 * 6234 * The functions in this section (and indeed, the functions in remaining 6235 * sections) are not _called_ from probe context. (Any exceptions to this are 6236 * marked with a "Note:".) Rather, they are called from elsewhere in the 6237 * DTrace framework to look-up probes in, add probes to and remove probes from 6238 * the DTrace probe hashes. (Each probe is hashed by each element of the 6239 * probe tuple -- allowing for fast lookups, regardless of what was 6240 * specified.) 6241 */ 6242 static uint_t 6243 dtrace_hash_str(char *p) 6244 { 6245 unsigned int g; 6246 uint_t hval = 0; 6247 6248 while (*p) { 6249 hval = (hval << 4) + *p++; 6250 if ((g = (hval & 0xf0000000)) != 0) 6251 hval ^= g >> 24; 6252 hval &= ~g; 6253 } 6254 return (hval); 6255 } 6256 6257 static dtrace_hash_t * 6258 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6259 { 6260 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6261 6262 hash->dth_stroffs = stroffs; 6263 hash->dth_nextoffs = nextoffs; 6264 hash->dth_prevoffs = prevoffs; 6265 6266 hash->dth_size = 1; 6267 hash->dth_mask = hash->dth_size - 1; 6268 6269 hash->dth_tab = kmem_zalloc(hash->dth_size * 6270 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6271 6272 return (hash); 6273 } 6274 6275 static void 6276 dtrace_hash_destroy(dtrace_hash_t *hash) 6277 { 6278 #ifdef DEBUG 6279 int i; 6280 6281 for (i = 0; i < hash->dth_size; i++) 6282 ASSERT(hash->dth_tab[i] == NULL); 6283 #endif 6284 6285 kmem_free(hash->dth_tab, 6286 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6287 kmem_free(hash, sizeof (dtrace_hash_t)); 6288 } 6289 6290 static void 6291 dtrace_hash_resize(dtrace_hash_t *hash) 6292 { 6293 int size = hash->dth_size, i, ndx; 6294 int new_size = hash->dth_size << 1; 6295 int new_mask = new_size - 1; 6296 dtrace_hashbucket_t **new_tab, *bucket, *next; 6297 6298 ASSERT((new_size & new_mask) == 0); 6299 6300 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6301 6302 for (i = 0; i < size; i++) { 6303 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6304 dtrace_probe_t *probe = bucket->dthb_chain; 6305 6306 ASSERT(probe != NULL); 6307 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6308 6309 next = bucket->dthb_next; 6310 bucket->dthb_next = new_tab[ndx]; 6311 new_tab[ndx] = bucket; 6312 } 6313 } 6314 6315 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6316 hash->dth_tab = new_tab; 6317 hash->dth_size = new_size; 6318 hash->dth_mask = new_mask; 6319 } 6320 6321 static void 6322 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6323 { 6324 int hashval = DTRACE_HASHSTR(hash, new); 6325 int ndx = hashval & hash->dth_mask; 6326 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6327 dtrace_probe_t **nextp, **prevp; 6328 6329 for (; bucket != NULL; bucket = bucket->dthb_next) { 6330 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6331 goto add; 6332 } 6333 6334 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6335 dtrace_hash_resize(hash); 6336 dtrace_hash_add(hash, new); 6337 return; 6338 } 6339 6340 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6341 bucket->dthb_next = hash->dth_tab[ndx]; 6342 hash->dth_tab[ndx] = bucket; 6343 hash->dth_nbuckets++; 6344 6345 add: 6346 nextp = DTRACE_HASHNEXT(hash, new); 6347 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6348 *nextp = bucket->dthb_chain; 6349 6350 if (bucket->dthb_chain != NULL) { 6351 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6352 ASSERT(*prevp == NULL); 6353 *prevp = new; 6354 } 6355 6356 bucket->dthb_chain = new; 6357 bucket->dthb_len++; 6358 } 6359 6360 static dtrace_probe_t * 6361 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6362 { 6363 int hashval = DTRACE_HASHSTR(hash, template); 6364 int ndx = hashval & hash->dth_mask; 6365 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6366 6367 for (; bucket != NULL; bucket = bucket->dthb_next) { 6368 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6369 return (bucket->dthb_chain); 6370 } 6371 6372 return (NULL); 6373 } 6374 6375 static int 6376 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6377 { 6378 int hashval = DTRACE_HASHSTR(hash, template); 6379 int ndx = hashval & hash->dth_mask; 6380 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6381 6382 for (; bucket != NULL; bucket = bucket->dthb_next) { 6383 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6384 return (bucket->dthb_len); 6385 } 6386 6387 return (NULL); 6388 } 6389 6390 static void 6391 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6392 { 6393 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6394 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6395 6396 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6397 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6398 6399 /* 6400 * Find the bucket that we're removing this probe from. 6401 */ 6402 for (; bucket != NULL; bucket = bucket->dthb_next) { 6403 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6404 break; 6405 } 6406 6407 ASSERT(bucket != NULL); 6408 6409 if (*prevp == NULL) { 6410 if (*nextp == NULL) { 6411 /* 6412 * The removed probe was the only probe on this 6413 * bucket; we need to remove the bucket. 6414 */ 6415 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6416 6417 ASSERT(bucket->dthb_chain == probe); 6418 ASSERT(b != NULL); 6419 6420 if (b == bucket) { 6421 hash->dth_tab[ndx] = bucket->dthb_next; 6422 } else { 6423 while (b->dthb_next != bucket) 6424 b = b->dthb_next; 6425 b->dthb_next = bucket->dthb_next; 6426 } 6427 6428 ASSERT(hash->dth_nbuckets > 0); 6429 hash->dth_nbuckets--; 6430 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6431 return; 6432 } 6433 6434 bucket->dthb_chain = *nextp; 6435 } else { 6436 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6437 } 6438 6439 if (*nextp != NULL) 6440 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6441 } 6442 6443 /* 6444 * DTrace Utility Functions 6445 * 6446 * These are random utility functions that are _not_ called from probe context. 6447 */ 6448 static int 6449 dtrace_badattr(const dtrace_attribute_t *a) 6450 { 6451 return (a->dtat_name > DTRACE_STABILITY_MAX || 6452 a->dtat_data > DTRACE_STABILITY_MAX || 6453 a->dtat_class > DTRACE_CLASS_MAX); 6454 } 6455 6456 /* 6457 * Return a duplicate copy of a string. If the specified string is NULL, 6458 * this function returns a zero-length string. 6459 */ 6460 static char * 6461 dtrace_strdup(const char *str) 6462 { 6463 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6464 6465 if (str != NULL) 6466 (void) strcpy(new, str); 6467 6468 return (new); 6469 } 6470 6471 #define DTRACE_ISALPHA(c) \ 6472 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6473 6474 static int 6475 dtrace_badname(const char *s) 6476 { 6477 char c; 6478 6479 if (s == NULL || (c = *s++) == '\0') 6480 return (0); 6481 6482 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6483 return (1); 6484 6485 while ((c = *s++) != '\0') { 6486 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6487 c != '-' && c != '_' && c != '.' && c != '`') 6488 return (1); 6489 } 6490 6491 return (0); 6492 } 6493 6494 static void 6495 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6496 { 6497 uint32_t priv; 6498 6499 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6500 /* 6501 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6502 */ 6503 priv = DTRACE_PRIV_ALL; 6504 } else { 6505 *uidp = crgetuid(cr); 6506 *zoneidp = crgetzoneid(cr); 6507 6508 priv = 0; 6509 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6510 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6511 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6512 priv |= DTRACE_PRIV_USER; 6513 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6514 priv |= DTRACE_PRIV_PROC; 6515 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6516 priv |= DTRACE_PRIV_OWNER; 6517 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6518 priv |= DTRACE_PRIV_ZONEOWNER; 6519 } 6520 6521 *privp = priv; 6522 } 6523 6524 #ifdef DTRACE_ERRDEBUG 6525 static void 6526 dtrace_errdebug(const char *str) 6527 { 6528 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6529 int occupied = 0; 6530 6531 mutex_enter(&dtrace_errlock); 6532 dtrace_errlast = str; 6533 dtrace_errthread = curthread; 6534 6535 while (occupied++ < DTRACE_ERRHASHSZ) { 6536 if (dtrace_errhash[hval].dter_msg == str) { 6537 dtrace_errhash[hval].dter_count++; 6538 goto out; 6539 } 6540 6541 if (dtrace_errhash[hval].dter_msg != NULL) { 6542 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6543 continue; 6544 } 6545 6546 dtrace_errhash[hval].dter_msg = str; 6547 dtrace_errhash[hval].dter_count = 1; 6548 goto out; 6549 } 6550 6551 panic("dtrace: undersized error hash"); 6552 out: 6553 mutex_exit(&dtrace_errlock); 6554 } 6555 #endif 6556 6557 /* 6558 * DTrace Matching Functions 6559 * 6560 * These functions are used to match groups of probes, given some elements of 6561 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6562 */ 6563 static int 6564 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6565 zoneid_t zoneid) 6566 { 6567 if (priv != DTRACE_PRIV_ALL) { 6568 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6569 uint32_t match = priv & ppriv; 6570 6571 /* 6572 * No PRIV_DTRACE_* privileges... 6573 */ 6574 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6575 DTRACE_PRIV_KERNEL)) == 0) 6576 return (0); 6577 6578 /* 6579 * No matching bits, but there were bits to match... 6580 */ 6581 if (match == 0 && ppriv != 0) 6582 return (0); 6583 6584 /* 6585 * Need to have permissions to the process, but don't... 6586 */ 6587 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6588 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6589 return (0); 6590 } 6591 6592 /* 6593 * Need to be in the same zone unless we possess the 6594 * privilege to examine all zones. 6595 */ 6596 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6597 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6598 return (0); 6599 } 6600 } 6601 6602 return (1); 6603 } 6604 6605 /* 6606 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6607 * consists of input pattern strings and an ops-vector to evaluate them. 6608 * This function returns >0 for match, 0 for no match, and <0 for error. 6609 */ 6610 static int 6611 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6612 uint32_t priv, uid_t uid, zoneid_t zoneid) 6613 { 6614 dtrace_provider_t *pvp = prp->dtpr_provider; 6615 int rv; 6616 6617 if (pvp->dtpv_defunct) 6618 return (0); 6619 6620 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6621 return (rv); 6622 6623 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6624 return (rv); 6625 6626 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6627 return (rv); 6628 6629 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6630 return (rv); 6631 6632 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6633 return (0); 6634 6635 return (rv); 6636 } 6637 6638 /* 6639 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6640 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6641 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6642 * In addition, all of the recursion cases except for '*' matching have been 6643 * unwound. For '*', we still implement recursive evaluation, but a depth 6644 * counter is maintained and matching is aborted if we recurse too deep. 6645 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6646 */ 6647 static int 6648 dtrace_match_glob(const char *s, const char *p, int depth) 6649 { 6650 const char *olds; 6651 char s1, c; 6652 int gs; 6653 6654 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6655 return (-1); 6656 6657 if (s == NULL) 6658 s = ""; /* treat NULL as empty string */ 6659 6660 top: 6661 olds = s; 6662 s1 = *s++; 6663 6664 if (p == NULL) 6665 return (0); 6666 6667 if ((c = *p++) == '\0') 6668 return (s1 == '\0'); 6669 6670 switch (c) { 6671 case '[': { 6672 int ok = 0, notflag = 0; 6673 char lc = '\0'; 6674 6675 if (s1 == '\0') 6676 return (0); 6677 6678 if (*p == '!') { 6679 notflag = 1; 6680 p++; 6681 } 6682 6683 if ((c = *p++) == '\0') 6684 return (0); 6685 6686 do { 6687 if (c == '-' && lc != '\0' && *p != ']') { 6688 if ((c = *p++) == '\0') 6689 return (0); 6690 if (c == '\\' && (c = *p++) == '\0') 6691 return (0); 6692 6693 if (notflag) { 6694 if (s1 < lc || s1 > c) 6695 ok++; 6696 else 6697 return (0); 6698 } else if (lc <= s1 && s1 <= c) 6699 ok++; 6700 6701 } else if (c == '\\' && (c = *p++) == '\0') 6702 return (0); 6703 6704 lc = c; /* save left-hand 'c' for next iteration */ 6705 6706 if (notflag) { 6707 if (s1 != c) 6708 ok++; 6709 else 6710 return (0); 6711 } else if (s1 == c) 6712 ok++; 6713 6714 if ((c = *p++) == '\0') 6715 return (0); 6716 6717 } while (c != ']'); 6718 6719 if (ok) 6720 goto top; 6721 6722 return (0); 6723 } 6724 6725 case '\\': 6726 if ((c = *p++) == '\0') 6727 return (0); 6728 /*FALLTHRU*/ 6729 6730 default: 6731 if (c != s1) 6732 return (0); 6733 /*FALLTHRU*/ 6734 6735 case '?': 6736 if (s1 != '\0') 6737 goto top; 6738 return (0); 6739 6740 case '*': 6741 while (*p == '*') 6742 p++; /* consecutive *'s are identical to a single one */ 6743 6744 if (*p == '\0') 6745 return (1); 6746 6747 for (s = olds; *s != '\0'; s++) { 6748 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 6749 return (gs); 6750 } 6751 6752 return (0); 6753 } 6754 } 6755 6756 /*ARGSUSED*/ 6757 static int 6758 dtrace_match_string(const char *s, const char *p, int depth) 6759 { 6760 return (s != NULL && strcmp(s, p) == 0); 6761 } 6762 6763 /*ARGSUSED*/ 6764 static int 6765 dtrace_match_nul(const char *s, const char *p, int depth) 6766 { 6767 return (1); /* always match the empty pattern */ 6768 } 6769 6770 /*ARGSUSED*/ 6771 static int 6772 dtrace_match_nonzero(const char *s, const char *p, int depth) 6773 { 6774 return (s != NULL && s[0] != '\0'); 6775 } 6776 6777 static int 6778 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 6779 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 6780 { 6781 dtrace_probe_t template, *probe; 6782 dtrace_hash_t *hash = NULL; 6783 int len, rc, best = INT_MAX, nmatched = 0; 6784 dtrace_id_t i; 6785 6786 ASSERT(MUTEX_HELD(&dtrace_lock)); 6787 6788 /* 6789 * If the probe ID is specified in the key, just lookup by ID and 6790 * invoke the match callback once if a matching probe is found. 6791 */ 6792 if (pkp->dtpk_id != DTRACE_IDNONE) { 6793 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 6794 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 6795 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL) 6796 return (DTRACE_MATCH_FAIL); 6797 nmatched++; 6798 } 6799 return (nmatched); 6800 } 6801 6802 template.dtpr_mod = (char *)pkp->dtpk_mod; 6803 template.dtpr_func = (char *)pkp->dtpk_func; 6804 template.dtpr_name = (char *)pkp->dtpk_name; 6805 6806 /* 6807 * We want to find the most distinct of the module name, function 6808 * name, and name. So for each one that is not a glob pattern or 6809 * empty string, we perform a lookup in the corresponding hash and 6810 * use the hash table with the fewest collisions to do our search. 6811 */ 6812 if (pkp->dtpk_mmatch == &dtrace_match_string && 6813 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 6814 best = len; 6815 hash = dtrace_bymod; 6816 } 6817 6818 if (pkp->dtpk_fmatch == &dtrace_match_string && 6819 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 6820 best = len; 6821 hash = dtrace_byfunc; 6822 } 6823 6824 if (pkp->dtpk_nmatch == &dtrace_match_string && 6825 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 6826 best = len; 6827 hash = dtrace_byname; 6828 } 6829 6830 /* 6831 * If we did not select a hash table, iterate over every probe and 6832 * invoke our callback for each one that matches our input probe key. 6833 */ 6834 if (hash == NULL) { 6835 for (i = 0; i < dtrace_nprobes; i++) { 6836 if ((probe = dtrace_probes[i]) == NULL || 6837 dtrace_match_probe(probe, pkp, priv, uid, 6838 zoneid) <= 0) 6839 continue; 6840 6841 nmatched++; 6842 6843 if ((rc = (*matched)(probe, arg)) != 6844 DTRACE_MATCH_NEXT) { 6845 if (rc == DTRACE_MATCH_FAIL) 6846 return (DTRACE_MATCH_FAIL); 6847 break; 6848 } 6849 } 6850 6851 return (nmatched); 6852 } 6853 6854 /* 6855 * If we selected a hash table, iterate over each probe of the same key 6856 * name and invoke the callback for every probe that matches the other 6857 * attributes of our input probe key. 6858 */ 6859 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6860 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6861 6862 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6863 continue; 6864 6865 nmatched++; 6866 6867 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { 6868 if (rc == DTRACE_MATCH_FAIL) 6869 return (DTRACE_MATCH_FAIL); 6870 break; 6871 } 6872 } 6873 6874 return (nmatched); 6875 } 6876 6877 /* 6878 * Return the function pointer dtrace_probecmp() should use to compare the 6879 * specified pattern with a string. For NULL or empty patterns, we select 6880 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6881 * For non-empty non-glob strings, we use dtrace_match_string(). 6882 */ 6883 static dtrace_probekey_f * 6884 dtrace_probekey_func(const char *p) 6885 { 6886 char c; 6887 6888 if (p == NULL || *p == '\0') 6889 return (&dtrace_match_nul); 6890 6891 while ((c = *p++) != '\0') { 6892 if (c == '[' || c == '?' || c == '*' || c == '\\') 6893 return (&dtrace_match_glob); 6894 } 6895 6896 return (&dtrace_match_string); 6897 } 6898 6899 /* 6900 * Build a probe comparison key for use with dtrace_match_probe() from the 6901 * given probe description. By convention, a null key only matches anchored 6902 * probes: if each field is the empty string, reset dtpk_fmatch to 6903 * dtrace_match_nonzero(). 6904 */ 6905 static void 6906 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6907 { 6908 pkp->dtpk_prov = pdp->dtpd_provider; 6909 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6910 6911 pkp->dtpk_mod = pdp->dtpd_mod; 6912 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6913 6914 pkp->dtpk_func = pdp->dtpd_func; 6915 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6916 6917 pkp->dtpk_name = pdp->dtpd_name; 6918 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6919 6920 pkp->dtpk_id = pdp->dtpd_id; 6921 6922 if (pkp->dtpk_id == DTRACE_IDNONE && 6923 pkp->dtpk_pmatch == &dtrace_match_nul && 6924 pkp->dtpk_mmatch == &dtrace_match_nul && 6925 pkp->dtpk_fmatch == &dtrace_match_nul && 6926 pkp->dtpk_nmatch == &dtrace_match_nul) 6927 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6928 } 6929 6930 /* 6931 * DTrace Provider-to-Framework API Functions 6932 * 6933 * These functions implement much of the Provider-to-Framework API, as 6934 * described in <sys/dtrace.h>. The parts of the API not in this section are 6935 * the functions in the API for probe management (found below), and 6936 * dtrace_probe() itself (found above). 6937 */ 6938 6939 /* 6940 * Register the calling provider with the DTrace framework. This should 6941 * generally be called by DTrace providers in their attach(9E) entry point. 6942 */ 6943 int 6944 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6945 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6946 { 6947 dtrace_provider_t *provider; 6948 6949 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6950 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6951 "arguments", name ? name : "<NULL>"); 6952 return (EINVAL); 6953 } 6954 6955 if (name[0] == '\0' || dtrace_badname(name)) { 6956 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6957 "provider name", name); 6958 return (EINVAL); 6959 } 6960 6961 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6962 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6963 pops->dtps_destroy == NULL || 6964 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6965 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6966 "provider ops", name); 6967 return (EINVAL); 6968 } 6969 6970 if (dtrace_badattr(&pap->dtpa_provider) || 6971 dtrace_badattr(&pap->dtpa_mod) || 6972 dtrace_badattr(&pap->dtpa_func) || 6973 dtrace_badattr(&pap->dtpa_name) || 6974 dtrace_badattr(&pap->dtpa_args)) { 6975 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6976 "provider attributes", name); 6977 return (EINVAL); 6978 } 6979 6980 if (priv & ~DTRACE_PRIV_ALL) { 6981 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6982 "privilege attributes", name); 6983 return (EINVAL); 6984 } 6985 6986 if ((priv & DTRACE_PRIV_KERNEL) && 6987 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 6988 pops->dtps_usermode == NULL) { 6989 cmn_err(CE_WARN, "failed to register provider '%s': need " 6990 "dtps_usermode() op for given privilege attributes", name); 6991 return (EINVAL); 6992 } 6993 6994 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 6995 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6996 (void) strcpy(provider->dtpv_name, name); 6997 6998 provider->dtpv_attr = *pap; 6999 provider->dtpv_priv.dtpp_flags = priv; 7000 if (cr != NULL) { 7001 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7002 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7003 } 7004 provider->dtpv_pops = *pops; 7005 7006 if (pops->dtps_provide == NULL) { 7007 ASSERT(pops->dtps_provide_module != NULL); 7008 provider->dtpv_pops.dtps_provide = 7009 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 7010 } 7011 7012 if (pops->dtps_provide_module == NULL) { 7013 ASSERT(pops->dtps_provide != NULL); 7014 provider->dtpv_pops.dtps_provide_module = 7015 (void (*)(void *, struct modctl *))dtrace_nullop; 7016 } 7017 7018 if (pops->dtps_suspend == NULL) { 7019 ASSERT(pops->dtps_resume == NULL); 7020 provider->dtpv_pops.dtps_suspend = 7021 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7022 provider->dtpv_pops.dtps_resume = 7023 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7024 } 7025 7026 provider->dtpv_arg = arg; 7027 *idp = (dtrace_provider_id_t)provider; 7028 7029 if (pops == &dtrace_provider_ops) { 7030 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7031 ASSERT(MUTEX_HELD(&dtrace_lock)); 7032 ASSERT(dtrace_anon.dta_enabling == NULL); 7033 7034 /* 7035 * We make sure that the DTrace provider is at the head of 7036 * the provider chain. 7037 */ 7038 provider->dtpv_next = dtrace_provider; 7039 dtrace_provider = provider; 7040 return (0); 7041 } 7042 7043 mutex_enter(&dtrace_provider_lock); 7044 mutex_enter(&dtrace_lock); 7045 7046 /* 7047 * If there is at least one provider registered, we'll add this 7048 * provider after the first provider. 7049 */ 7050 if (dtrace_provider != NULL) { 7051 provider->dtpv_next = dtrace_provider->dtpv_next; 7052 dtrace_provider->dtpv_next = provider; 7053 } else { 7054 dtrace_provider = provider; 7055 } 7056 7057 if (dtrace_retained != NULL) { 7058 dtrace_enabling_provide(provider); 7059 7060 /* 7061 * Now we need to call dtrace_enabling_matchall() -- which 7062 * will acquire cpu_lock and dtrace_lock. We therefore need 7063 * to drop all of our locks before calling into it... 7064 */ 7065 mutex_exit(&dtrace_lock); 7066 mutex_exit(&dtrace_provider_lock); 7067 dtrace_enabling_matchall(); 7068 7069 return (0); 7070 } 7071 7072 mutex_exit(&dtrace_lock); 7073 mutex_exit(&dtrace_provider_lock); 7074 7075 return (0); 7076 } 7077 7078 /* 7079 * Unregister the specified provider from the DTrace framework. This should 7080 * generally be called by DTrace providers in their detach(9E) entry point. 7081 */ 7082 int 7083 dtrace_unregister(dtrace_provider_id_t id) 7084 { 7085 dtrace_provider_t *old = (dtrace_provider_t *)id; 7086 dtrace_provider_t *prev = NULL; 7087 int i, self = 0; 7088 dtrace_probe_t *probe, *first = NULL; 7089 7090 if (old->dtpv_pops.dtps_enable == 7091 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) { 7092 /* 7093 * If DTrace itself is the provider, we're called with locks 7094 * already held. 7095 */ 7096 ASSERT(old == dtrace_provider); 7097 ASSERT(dtrace_devi != NULL); 7098 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7099 ASSERT(MUTEX_HELD(&dtrace_lock)); 7100 self = 1; 7101 7102 if (dtrace_provider->dtpv_next != NULL) { 7103 /* 7104 * There's another provider here; return failure. 7105 */ 7106 return (EBUSY); 7107 } 7108 } else { 7109 mutex_enter(&dtrace_provider_lock); 7110 mutex_enter(&mod_lock); 7111 mutex_enter(&dtrace_lock); 7112 } 7113 7114 /* 7115 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7116 * probes, we refuse to let providers slither away, unless this 7117 * provider has already been explicitly invalidated. 7118 */ 7119 if (!old->dtpv_defunct && 7120 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7121 dtrace_anon.dta_state->dts_necbs > 0))) { 7122 if (!self) { 7123 mutex_exit(&dtrace_lock); 7124 mutex_exit(&mod_lock); 7125 mutex_exit(&dtrace_provider_lock); 7126 } 7127 return (EBUSY); 7128 } 7129 7130 /* 7131 * Attempt to destroy the probes associated with this provider. 7132 */ 7133 for (i = 0; i < dtrace_nprobes; i++) { 7134 if ((probe = dtrace_probes[i]) == NULL) 7135 continue; 7136 7137 if (probe->dtpr_provider != old) 7138 continue; 7139 7140 if (probe->dtpr_ecb == NULL) 7141 continue; 7142 7143 /* 7144 * We have at least one ECB; we can't remove this provider. 7145 */ 7146 if (!self) { 7147 mutex_exit(&dtrace_lock); 7148 mutex_exit(&mod_lock); 7149 mutex_exit(&dtrace_provider_lock); 7150 } 7151 return (EBUSY); 7152 } 7153 7154 /* 7155 * All of the probes for this provider are disabled; we can safely 7156 * remove all of them from their hash chains and from the probe array. 7157 */ 7158 for (i = 0; i < dtrace_nprobes; i++) { 7159 if ((probe = dtrace_probes[i]) == NULL) 7160 continue; 7161 7162 if (probe->dtpr_provider != old) 7163 continue; 7164 7165 dtrace_probes[i] = NULL; 7166 7167 dtrace_hash_remove(dtrace_bymod, probe); 7168 dtrace_hash_remove(dtrace_byfunc, probe); 7169 dtrace_hash_remove(dtrace_byname, probe); 7170 7171 if (first == NULL) { 7172 first = probe; 7173 probe->dtpr_nextmod = NULL; 7174 } else { 7175 probe->dtpr_nextmod = first; 7176 first = probe; 7177 } 7178 } 7179 7180 /* 7181 * The provider's probes have been removed from the hash chains and 7182 * from the probe array. Now issue a dtrace_sync() to be sure that 7183 * everyone has cleared out from any probe array processing. 7184 */ 7185 dtrace_sync(); 7186 7187 for (probe = first; probe != NULL; probe = first) { 7188 first = probe->dtpr_nextmod; 7189 7190 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7191 probe->dtpr_arg); 7192 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7193 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7194 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7195 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7196 kmem_free(probe, sizeof (dtrace_probe_t)); 7197 } 7198 7199 if ((prev = dtrace_provider) == old) { 7200 ASSERT(self || dtrace_devi == NULL); 7201 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7202 dtrace_provider = old->dtpv_next; 7203 } else { 7204 while (prev != NULL && prev->dtpv_next != old) 7205 prev = prev->dtpv_next; 7206 7207 if (prev == NULL) { 7208 panic("attempt to unregister non-existent " 7209 "dtrace provider %p\n", (void *)id); 7210 } 7211 7212 prev->dtpv_next = old->dtpv_next; 7213 } 7214 7215 if (!self) { 7216 mutex_exit(&dtrace_lock); 7217 mutex_exit(&mod_lock); 7218 mutex_exit(&dtrace_provider_lock); 7219 } 7220 7221 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7222 kmem_free(old, sizeof (dtrace_provider_t)); 7223 7224 return (0); 7225 } 7226 7227 /* 7228 * Invalidate the specified provider. All subsequent probe lookups for the 7229 * specified provider will fail, but its probes will not be removed. 7230 */ 7231 void 7232 dtrace_invalidate(dtrace_provider_id_t id) 7233 { 7234 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7235 7236 ASSERT(pvp->dtpv_pops.dtps_enable != 7237 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7238 7239 mutex_enter(&dtrace_provider_lock); 7240 mutex_enter(&dtrace_lock); 7241 7242 pvp->dtpv_defunct = 1; 7243 7244 mutex_exit(&dtrace_lock); 7245 mutex_exit(&dtrace_provider_lock); 7246 } 7247 7248 /* 7249 * Indicate whether or not DTrace has attached. 7250 */ 7251 int 7252 dtrace_attached(void) 7253 { 7254 /* 7255 * dtrace_provider will be non-NULL iff the DTrace driver has 7256 * attached. (It's non-NULL because DTrace is always itself a 7257 * provider.) 7258 */ 7259 return (dtrace_provider != NULL); 7260 } 7261 7262 /* 7263 * Remove all the unenabled probes for the given provider. This function is 7264 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7265 * -- just as many of its associated probes as it can. 7266 */ 7267 int 7268 dtrace_condense(dtrace_provider_id_t id) 7269 { 7270 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7271 int i; 7272 dtrace_probe_t *probe; 7273 7274 /* 7275 * Make sure this isn't the dtrace provider itself. 7276 */ 7277 ASSERT(prov->dtpv_pops.dtps_enable != 7278 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7279 7280 mutex_enter(&dtrace_provider_lock); 7281 mutex_enter(&dtrace_lock); 7282 7283 /* 7284 * Attempt to destroy the probes associated with this provider. 7285 */ 7286 for (i = 0; i < dtrace_nprobes; i++) { 7287 if ((probe = dtrace_probes[i]) == NULL) 7288 continue; 7289 7290 if (probe->dtpr_provider != prov) 7291 continue; 7292 7293 if (probe->dtpr_ecb != NULL) 7294 continue; 7295 7296 dtrace_probes[i] = NULL; 7297 7298 dtrace_hash_remove(dtrace_bymod, probe); 7299 dtrace_hash_remove(dtrace_byfunc, probe); 7300 dtrace_hash_remove(dtrace_byname, probe); 7301 7302 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7303 probe->dtpr_arg); 7304 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7305 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7306 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7307 kmem_free(probe, sizeof (dtrace_probe_t)); 7308 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7309 } 7310 7311 mutex_exit(&dtrace_lock); 7312 mutex_exit(&dtrace_provider_lock); 7313 7314 return (0); 7315 } 7316 7317 /* 7318 * DTrace Probe Management Functions 7319 * 7320 * The functions in this section perform the DTrace probe management, 7321 * including functions to create probes, look-up probes, and call into the 7322 * providers to request that probes be provided. Some of these functions are 7323 * in the Provider-to-Framework API; these functions can be identified by the 7324 * fact that they are not declared "static". 7325 */ 7326 7327 /* 7328 * Create a probe with the specified module name, function name, and name. 7329 */ 7330 dtrace_id_t 7331 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7332 const char *func, const char *name, int aframes, void *arg) 7333 { 7334 dtrace_probe_t *probe, **probes; 7335 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7336 dtrace_id_t id; 7337 7338 if (provider == dtrace_provider) { 7339 ASSERT(MUTEX_HELD(&dtrace_lock)); 7340 } else { 7341 mutex_enter(&dtrace_lock); 7342 } 7343 7344 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7345 VM_BESTFIT | VM_SLEEP); 7346 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7347 7348 probe->dtpr_id = id; 7349 probe->dtpr_gen = dtrace_probegen++; 7350 probe->dtpr_mod = dtrace_strdup(mod); 7351 probe->dtpr_func = dtrace_strdup(func); 7352 probe->dtpr_name = dtrace_strdup(name); 7353 probe->dtpr_arg = arg; 7354 probe->dtpr_aframes = aframes; 7355 probe->dtpr_provider = provider; 7356 7357 dtrace_hash_add(dtrace_bymod, probe); 7358 dtrace_hash_add(dtrace_byfunc, probe); 7359 dtrace_hash_add(dtrace_byname, probe); 7360 7361 if (id - 1 >= dtrace_nprobes) { 7362 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7363 size_t nsize = osize << 1; 7364 7365 if (nsize == 0) { 7366 ASSERT(osize == 0); 7367 ASSERT(dtrace_probes == NULL); 7368 nsize = sizeof (dtrace_probe_t *); 7369 } 7370 7371 probes = kmem_zalloc(nsize, KM_SLEEP); 7372 7373 if (dtrace_probes == NULL) { 7374 ASSERT(osize == 0); 7375 dtrace_probes = probes; 7376 dtrace_nprobes = 1; 7377 } else { 7378 dtrace_probe_t **oprobes = dtrace_probes; 7379 7380 bcopy(oprobes, probes, osize); 7381 dtrace_membar_producer(); 7382 dtrace_probes = probes; 7383 7384 dtrace_sync(); 7385 7386 /* 7387 * All CPUs are now seeing the new probes array; we can 7388 * safely free the old array. 7389 */ 7390 kmem_free(oprobes, osize); 7391 dtrace_nprobes <<= 1; 7392 } 7393 7394 ASSERT(id - 1 < dtrace_nprobes); 7395 } 7396 7397 ASSERT(dtrace_probes[id - 1] == NULL); 7398 dtrace_probes[id - 1] = probe; 7399 7400 if (provider != dtrace_provider) 7401 mutex_exit(&dtrace_lock); 7402 7403 return (id); 7404 } 7405 7406 static dtrace_probe_t * 7407 dtrace_probe_lookup_id(dtrace_id_t id) 7408 { 7409 ASSERT(MUTEX_HELD(&dtrace_lock)); 7410 7411 if (id == 0 || id > dtrace_nprobes) 7412 return (NULL); 7413 7414 return (dtrace_probes[id - 1]); 7415 } 7416 7417 static int 7418 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7419 { 7420 *((dtrace_id_t *)arg) = probe->dtpr_id; 7421 7422 return (DTRACE_MATCH_DONE); 7423 } 7424 7425 /* 7426 * Look up a probe based on provider and one or more of module name, function 7427 * name and probe name. 7428 */ 7429 dtrace_id_t 7430 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 7431 const char *func, const char *name) 7432 { 7433 dtrace_probekey_t pkey; 7434 dtrace_id_t id; 7435 int match; 7436 7437 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7438 pkey.dtpk_pmatch = &dtrace_match_string; 7439 pkey.dtpk_mod = mod; 7440 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7441 pkey.dtpk_func = func; 7442 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7443 pkey.dtpk_name = name; 7444 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7445 pkey.dtpk_id = DTRACE_IDNONE; 7446 7447 mutex_enter(&dtrace_lock); 7448 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7449 dtrace_probe_lookup_match, &id); 7450 mutex_exit(&dtrace_lock); 7451 7452 ASSERT(match == 1 || match == 0); 7453 return (match ? id : 0); 7454 } 7455 7456 /* 7457 * Returns the probe argument associated with the specified probe. 7458 */ 7459 void * 7460 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7461 { 7462 dtrace_probe_t *probe; 7463 void *rval = NULL; 7464 7465 mutex_enter(&dtrace_lock); 7466 7467 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7468 probe->dtpr_provider == (dtrace_provider_t *)id) 7469 rval = probe->dtpr_arg; 7470 7471 mutex_exit(&dtrace_lock); 7472 7473 return (rval); 7474 } 7475 7476 /* 7477 * Copy a probe into a probe description. 7478 */ 7479 static void 7480 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7481 { 7482 bzero(pdp, sizeof (dtrace_probedesc_t)); 7483 pdp->dtpd_id = prp->dtpr_id; 7484 7485 (void) strncpy(pdp->dtpd_provider, 7486 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7487 7488 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7489 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7490 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7491 } 7492 7493 /* 7494 * Called to indicate that a probe -- or probes -- should be provided by a 7495 * specfied provider. If the specified description is NULL, the provider will 7496 * be told to provide all of its probes. (This is done whenever a new 7497 * consumer comes along, or whenever a retained enabling is to be matched.) If 7498 * the specified description is non-NULL, the provider is given the 7499 * opportunity to dynamically provide the specified probe, allowing providers 7500 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7501 * probes.) If the provider is NULL, the operations will be applied to all 7502 * providers; if the provider is non-NULL the operations will only be applied 7503 * to the specified provider. The dtrace_provider_lock must be held, and the 7504 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7505 * will need to grab the dtrace_lock when it reenters the framework through 7506 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7507 */ 7508 static void 7509 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7510 { 7511 struct modctl *ctl; 7512 int all = 0; 7513 7514 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7515 7516 if (prv == NULL) { 7517 all = 1; 7518 prv = dtrace_provider; 7519 } 7520 7521 do { 7522 /* 7523 * First, call the blanket provide operation. 7524 */ 7525 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7526 7527 /* 7528 * Now call the per-module provide operation. We will grab 7529 * mod_lock to prevent the list from being modified. Note 7530 * that this also prevents the mod_busy bits from changing. 7531 * (mod_busy can only be changed with mod_lock held.) 7532 */ 7533 mutex_enter(&mod_lock); 7534 7535 ctl = &modules; 7536 do { 7537 if (ctl->mod_busy || ctl->mod_mp == NULL) 7538 continue; 7539 7540 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7541 7542 } while ((ctl = ctl->mod_next) != &modules); 7543 7544 mutex_exit(&mod_lock); 7545 } while (all && (prv = prv->dtpv_next) != NULL); 7546 } 7547 7548 /* 7549 * Iterate over each probe, and call the Framework-to-Provider API function 7550 * denoted by offs. 7551 */ 7552 static void 7553 dtrace_probe_foreach(uintptr_t offs) 7554 { 7555 dtrace_provider_t *prov; 7556 void (*func)(void *, dtrace_id_t, void *); 7557 dtrace_probe_t *probe; 7558 dtrace_icookie_t cookie; 7559 int i; 7560 7561 /* 7562 * We disable interrupts to walk through the probe array. This is 7563 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7564 * won't see stale data. 7565 */ 7566 cookie = dtrace_interrupt_disable(); 7567 7568 for (i = 0; i < dtrace_nprobes; i++) { 7569 if ((probe = dtrace_probes[i]) == NULL) 7570 continue; 7571 7572 if (probe->dtpr_ecb == NULL) { 7573 /* 7574 * This probe isn't enabled -- don't call the function. 7575 */ 7576 continue; 7577 } 7578 7579 prov = probe->dtpr_provider; 7580 func = *((void(**)(void *, dtrace_id_t, void *)) 7581 ((uintptr_t)&prov->dtpv_pops + offs)); 7582 7583 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7584 } 7585 7586 dtrace_interrupt_enable(cookie); 7587 } 7588 7589 static int 7590 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7591 { 7592 dtrace_probekey_t pkey; 7593 uint32_t priv; 7594 uid_t uid; 7595 zoneid_t zoneid; 7596 7597 ASSERT(MUTEX_HELD(&dtrace_lock)); 7598 dtrace_ecb_create_cache = NULL; 7599 7600 if (desc == NULL) { 7601 /* 7602 * If we're passed a NULL description, we're being asked to 7603 * create an ECB with a NULL probe. 7604 */ 7605 (void) dtrace_ecb_create_enable(NULL, enab); 7606 return (0); 7607 } 7608 7609 dtrace_probekey(desc, &pkey); 7610 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7611 &priv, &uid, &zoneid); 7612 7613 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7614 enab)); 7615 } 7616 7617 /* 7618 * DTrace Helper Provider Functions 7619 */ 7620 static void 7621 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7622 { 7623 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7624 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7625 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7626 } 7627 7628 static void 7629 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7630 const dof_provider_t *dofprov, char *strtab) 7631 { 7632 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7633 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7634 dofprov->dofpv_provattr); 7635 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7636 dofprov->dofpv_modattr); 7637 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7638 dofprov->dofpv_funcattr); 7639 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7640 dofprov->dofpv_nameattr); 7641 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7642 dofprov->dofpv_argsattr); 7643 } 7644 7645 static void 7646 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7647 { 7648 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7649 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7650 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7651 dof_provider_t *provider; 7652 dof_probe_t *probe; 7653 uint32_t *off, *enoff; 7654 uint8_t *arg; 7655 char *strtab; 7656 uint_t i, nprobes; 7657 dtrace_helper_provdesc_t dhpv; 7658 dtrace_helper_probedesc_t dhpb; 7659 dtrace_meta_t *meta = dtrace_meta_pid; 7660 dtrace_mops_t *mops = &meta->dtm_mops; 7661 void *parg; 7662 7663 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7664 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7665 provider->dofpv_strtab * dof->dofh_secsize); 7666 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7667 provider->dofpv_probes * dof->dofh_secsize); 7668 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7669 provider->dofpv_prargs * dof->dofh_secsize); 7670 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7671 provider->dofpv_proffs * dof->dofh_secsize); 7672 7673 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7674 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7675 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7676 enoff = NULL; 7677 7678 /* 7679 * See dtrace_helper_provider_validate(). 7680 */ 7681 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7682 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7683 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7684 provider->dofpv_prenoffs * dof->dofh_secsize); 7685 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 7686 } 7687 7688 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 7689 7690 /* 7691 * Create the provider. 7692 */ 7693 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7694 7695 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 7696 return; 7697 7698 meta->dtm_count++; 7699 7700 /* 7701 * Create the probes. 7702 */ 7703 for (i = 0; i < nprobes; i++) { 7704 probe = (dof_probe_t *)(uintptr_t)(daddr + 7705 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 7706 7707 dhpb.dthpb_mod = dhp->dofhp_mod; 7708 dhpb.dthpb_func = strtab + probe->dofpr_func; 7709 dhpb.dthpb_name = strtab + probe->dofpr_name; 7710 dhpb.dthpb_base = probe->dofpr_addr; 7711 dhpb.dthpb_offs = off + probe->dofpr_offidx; 7712 dhpb.dthpb_noffs = probe->dofpr_noffs; 7713 if (enoff != NULL) { 7714 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 7715 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 7716 } else { 7717 dhpb.dthpb_enoffs = NULL; 7718 dhpb.dthpb_nenoffs = 0; 7719 } 7720 dhpb.dthpb_args = arg + probe->dofpr_argidx; 7721 dhpb.dthpb_nargc = probe->dofpr_nargc; 7722 dhpb.dthpb_xargc = probe->dofpr_xargc; 7723 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 7724 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 7725 7726 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 7727 } 7728 } 7729 7730 static void 7731 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 7732 { 7733 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7734 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7735 int i; 7736 7737 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7738 7739 for (i = 0; i < dof->dofh_secnum; i++) { 7740 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7741 dof->dofh_secoff + i * dof->dofh_secsize); 7742 7743 if (sec->dofs_type != DOF_SECT_PROVIDER) 7744 continue; 7745 7746 dtrace_helper_provide_one(dhp, sec, pid); 7747 } 7748 7749 /* 7750 * We may have just created probes, so we must now rematch against 7751 * any retained enablings. Note that this call will acquire both 7752 * cpu_lock and dtrace_lock; the fact that we are holding 7753 * dtrace_meta_lock now is what defines the ordering with respect to 7754 * these three locks. 7755 */ 7756 dtrace_enabling_matchall(); 7757 } 7758 7759 static void 7760 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7761 { 7762 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7763 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7764 dof_sec_t *str_sec; 7765 dof_provider_t *provider; 7766 char *strtab; 7767 dtrace_helper_provdesc_t dhpv; 7768 dtrace_meta_t *meta = dtrace_meta_pid; 7769 dtrace_mops_t *mops = &meta->dtm_mops; 7770 7771 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7772 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7773 provider->dofpv_strtab * dof->dofh_secsize); 7774 7775 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7776 7777 /* 7778 * Create the provider. 7779 */ 7780 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7781 7782 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 7783 7784 meta->dtm_count--; 7785 } 7786 7787 static void 7788 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 7789 { 7790 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7791 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7792 int i; 7793 7794 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7795 7796 for (i = 0; i < dof->dofh_secnum; i++) { 7797 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7798 dof->dofh_secoff + i * dof->dofh_secsize); 7799 7800 if (sec->dofs_type != DOF_SECT_PROVIDER) 7801 continue; 7802 7803 dtrace_helper_provider_remove_one(dhp, sec, pid); 7804 } 7805 } 7806 7807 /* 7808 * DTrace Meta Provider-to-Framework API Functions 7809 * 7810 * These functions implement the Meta Provider-to-Framework API, as described 7811 * in <sys/dtrace.h>. 7812 */ 7813 int 7814 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 7815 dtrace_meta_provider_id_t *idp) 7816 { 7817 dtrace_meta_t *meta; 7818 dtrace_helpers_t *help, *next; 7819 int i; 7820 7821 *idp = DTRACE_METAPROVNONE; 7822 7823 /* 7824 * We strictly don't need the name, but we hold onto it for 7825 * debuggability. All hail error queues! 7826 */ 7827 if (name == NULL) { 7828 cmn_err(CE_WARN, "failed to register meta-provider: " 7829 "invalid name"); 7830 return (EINVAL); 7831 } 7832 7833 if (mops == NULL || 7834 mops->dtms_create_probe == NULL || 7835 mops->dtms_provide_pid == NULL || 7836 mops->dtms_remove_pid == NULL) { 7837 cmn_err(CE_WARN, "failed to register meta-register %s: " 7838 "invalid ops", name); 7839 return (EINVAL); 7840 } 7841 7842 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 7843 meta->dtm_mops = *mops; 7844 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7845 (void) strcpy(meta->dtm_name, name); 7846 meta->dtm_arg = arg; 7847 7848 mutex_enter(&dtrace_meta_lock); 7849 mutex_enter(&dtrace_lock); 7850 7851 if (dtrace_meta_pid != NULL) { 7852 mutex_exit(&dtrace_lock); 7853 mutex_exit(&dtrace_meta_lock); 7854 cmn_err(CE_WARN, "failed to register meta-register %s: " 7855 "user-land meta-provider exists", name); 7856 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7857 kmem_free(meta, sizeof (dtrace_meta_t)); 7858 return (EINVAL); 7859 } 7860 7861 dtrace_meta_pid = meta; 7862 *idp = (dtrace_meta_provider_id_t)meta; 7863 7864 /* 7865 * If there are providers and probes ready to go, pass them 7866 * off to the new meta provider now. 7867 */ 7868 7869 help = dtrace_deferred_pid; 7870 dtrace_deferred_pid = NULL; 7871 7872 mutex_exit(&dtrace_lock); 7873 7874 while (help != NULL) { 7875 for (i = 0; i < help->dthps_nprovs; i++) { 7876 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7877 help->dthps_pid); 7878 } 7879 7880 next = help->dthps_next; 7881 help->dthps_next = NULL; 7882 help->dthps_prev = NULL; 7883 help->dthps_deferred = 0; 7884 help = next; 7885 } 7886 7887 mutex_exit(&dtrace_meta_lock); 7888 7889 return (0); 7890 } 7891 7892 int 7893 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7894 { 7895 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7896 7897 mutex_enter(&dtrace_meta_lock); 7898 mutex_enter(&dtrace_lock); 7899 7900 if (old == dtrace_meta_pid) { 7901 pp = &dtrace_meta_pid; 7902 } else { 7903 panic("attempt to unregister non-existent " 7904 "dtrace meta-provider %p\n", (void *)old); 7905 } 7906 7907 if (old->dtm_count != 0) { 7908 mutex_exit(&dtrace_lock); 7909 mutex_exit(&dtrace_meta_lock); 7910 return (EBUSY); 7911 } 7912 7913 *pp = NULL; 7914 7915 mutex_exit(&dtrace_lock); 7916 mutex_exit(&dtrace_meta_lock); 7917 7918 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7919 kmem_free(old, sizeof (dtrace_meta_t)); 7920 7921 return (0); 7922 } 7923 7924 7925 /* 7926 * DTrace DIF Object Functions 7927 */ 7928 static int 7929 dtrace_difo_err(uint_t pc, const char *format, ...) 7930 { 7931 if (dtrace_err_verbose) { 7932 va_list alist; 7933 7934 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7935 va_start(alist, format); 7936 (void) vuprintf(format, alist); 7937 va_end(alist); 7938 } 7939 7940 #ifdef DTRACE_ERRDEBUG 7941 dtrace_errdebug(format); 7942 #endif 7943 return (1); 7944 } 7945 7946 /* 7947 * Validate a DTrace DIF object by checking the IR instructions. The following 7948 * rules are currently enforced by dtrace_difo_validate(): 7949 * 7950 * 1. Each instruction must have a valid opcode 7951 * 2. Each register, string, variable, or subroutine reference must be valid 7952 * 3. No instruction can modify register %r0 (must be zero) 7953 * 4. All instruction reserved bits must be set to zero 7954 * 5. The last instruction must be a "ret" instruction 7955 * 6. All branch targets must reference a valid instruction _after_ the branch 7956 */ 7957 static int 7958 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7959 cred_t *cr) 7960 { 7961 int err = 0, i; 7962 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7963 int kcheckload; 7964 uint_t pc; 7965 7966 kcheckload = cr == NULL || 7967 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 7968 7969 dp->dtdo_destructive = 0; 7970 7971 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 7972 dif_instr_t instr = dp->dtdo_buf[pc]; 7973 7974 uint_t r1 = DIF_INSTR_R1(instr); 7975 uint_t r2 = DIF_INSTR_R2(instr); 7976 uint_t rd = DIF_INSTR_RD(instr); 7977 uint_t rs = DIF_INSTR_RS(instr); 7978 uint_t label = DIF_INSTR_LABEL(instr); 7979 uint_t v = DIF_INSTR_VAR(instr); 7980 uint_t subr = DIF_INSTR_SUBR(instr); 7981 uint_t type = DIF_INSTR_TYPE(instr); 7982 uint_t op = DIF_INSTR_OP(instr); 7983 7984 switch (op) { 7985 case DIF_OP_OR: 7986 case DIF_OP_XOR: 7987 case DIF_OP_AND: 7988 case DIF_OP_SLL: 7989 case DIF_OP_SRL: 7990 case DIF_OP_SRA: 7991 case DIF_OP_SUB: 7992 case DIF_OP_ADD: 7993 case DIF_OP_MUL: 7994 case DIF_OP_SDIV: 7995 case DIF_OP_UDIV: 7996 case DIF_OP_SREM: 7997 case DIF_OP_UREM: 7998 case DIF_OP_COPYS: 7999 if (r1 >= nregs) 8000 err += efunc(pc, "invalid register %u\n", r1); 8001 if (r2 >= nregs) 8002 err += efunc(pc, "invalid register %u\n", r2); 8003 if (rd >= nregs) 8004 err += efunc(pc, "invalid register %u\n", rd); 8005 if (rd == 0) 8006 err += efunc(pc, "cannot write to %r0\n"); 8007 break; 8008 case DIF_OP_NOT: 8009 case DIF_OP_MOV: 8010 case DIF_OP_ALLOCS: 8011 if (r1 >= nregs) 8012 err += efunc(pc, "invalid register %u\n", r1); 8013 if (r2 != 0) 8014 err += efunc(pc, "non-zero reserved bits\n"); 8015 if (rd >= nregs) 8016 err += efunc(pc, "invalid register %u\n", rd); 8017 if (rd == 0) 8018 err += efunc(pc, "cannot write to %r0\n"); 8019 break; 8020 case DIF_OP_LDSB: 8021 case DIF_OP_LDSH: 8022 case DIF_OP_LDSW: 8023 case DIF_OP_LDUB: 8024 case DIF_OP_LDUH: 8025 case DIF_OP_LDUW: 8026 case DIF_OP_LDX: 8027 if (r1 >= nregs) 8028 err += efunc(pc, "invalid register %u\n", r1); 8029 if (r2 != 0) 8030 err += efunc(pc, "non-zero reserved bits\n"); 8031 if (rd >= nregs) 8032 err += efunc(pc, "invalid register %u\n", rd); 8033 if (rd == 0) 8034 err += efunc(pc, "cannot write to %r0\n"); 8035 if (kcheckload) 8036 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8037 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8038 break; 8039 case DIF_OP_RLDSB: 8040 case DIF_OP_RLDSH: 8041 case DIF_OP_RLDSW: 8042 case DIF_OP_RLDUB: 8043 case DIF_OP_RLDUH: 8044 case DIF_OP_RLDUW: 8045 case DIF_OP_RLDX: 8046 if (r1 >= nregs) 8047 err += efunc(pc, "invalid register %u\n", r1); 8048 if (r2 != 0) 8049 err += efunc(pc, "non-zero reserved bits\n"); 8050 if (rd >= nregs) 8051 err += efunc(pc, "invalid register %u\n", rd); 8052 if (rd == 0) 8053 err += efunc(pc, "cannot write to %r0\n"); 8054 break; 8055 case DIF_OP_ULDSB: 8056 case DIF_OP_ULDSH: 8057 case DIF_OP_ULDSW: 8058 case DIF_OP_ULDUB: 8059 case DIF_OP_ULDUH: 8060 case DIF_OP_ULDUW: 8061 case DIF_OP_ULDX: 8062 if (r1 >= nregs) 8063 err += efunc(pc, "invalid register %u\n", r1); 8064 if (r2 != 0) 8065 err += efunc(pc, "non-zero reserved bits\n"); 8066 if (rd >= nregs) 8067 err += efunc(pc, "invalid register %u\n", rd); 8068 if (rd == 0) 8069 err += efunc(pc, "cannot write to %r0\n"); 8070 break; 8071 case DIF_OP_STB: 8072 case DIF_OP_STH: 8073 case DIF_OP_STW: 8074 case DIF_OP_STX: 8075 if (r1 >= nregs) 8076 err += efunc(pc, "invalid register %u\n", r1); 8077 if (r2 != 0) 8078 err += efunc(pc, "non-zero reserved bits\n"); 8079 if (rd >= nregs) 8080 err += efunc(pc, "invalid register %u\n", rd); 8081 if (rd == 0) 8082 err += efunc(pc, "cannot write to 0 address\n"); 8083 break; 8084 case DIF_OP_CMP: 8085 case DIF_OP_SCMP: 8086 if (r1 >= nregs) 8087 err += efunc(pc, "invalid register %u\n", r1); 8088 if (r2 >= nregs) 8089 err += efunc(pc, "invalid register %u\n", r2); 8090 if (rd != 0) 8091 err += efunc(pc, "non-zero reserved bits\n"); 8092 break; 8093 case DIF_OP_TST: 8094 if (r1 >= nregs) 8095 err += efunc(pc, "invalid register %u\n", r1); 8096 if (r2 != 0 || rd != 0) 8097 err += efunc(pc, "non-zero reserved bits\n"); 8098 break; 8099 case DIF_OP_BA: 8100 case DIF_OP_BE: 8101 case DIF_OP_BNE: 8102 case DIF_OP_BG: 8103 case DIF_OP_BGU: 8104 case DIF_OP_BGE: 8105 case DIF_OP_BGEU: 8106 case DIF_OP_BL: 8107 case DIF_OP_BLU: 8108 case DIF_OP_BLE: 8109 case DIF_OP_BLEU: 8110 if (label >= dp->dtdo_len) { 8111 err += efunc(pc, "invalid branch target %u\n", 8112 label); 8113 } 8114 if (label <= pc) { 8115 err += efunc(pc, "backward branch to %u\n", 8116 label); 8117 } 8118 break; 8119 case DIF_OP_RET: 8120 if (r1 != 0 || r2 != 0) 8121 err += efunc(pc, "non-zero reserved bits\n"); 8122 if (rd >= nregs) 8123 err += efunc(pc, "invalid register %u\n", rd); 8124 break; 8125 case DIF_OP_NOP: 8126 case DIF_OP_POPTS: 8127 case DIF_OP_FLUSHTS: 8128 if (r1 != 0 || r2 != 0 || rd != 0) 8129 err += efunc(pc, "non-zero reserved bits\n"); 8130 break; 8131 case DIF_OP_SETX: 8132 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8133 err += efunc(pc, "invalid integer ref %u\n", 8134 DIF_INSTR_INTEGER(instr)); 8135 } 8136 if (rd >= nregs) 8137 err += efunc(pc, "invalid register %u\n", rd); 8138 if (rd == 0) 8139 err += efunc(pc, "cannot write to %r0\n"); 8140 break; 8141 case DIF_OP_SETS: 8142 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8143 err += efunc(pc, "invalid string ref %u\n", 8144 DIF_INSTR_STRING(instr)); 8145 } 8146 if (rd >= nregs) 8147 err += efunc(pc, "invalid register %u\n", rd); 8148 if (rd == 0) 8149 err += efunc(pc, "cannot write to %r0\n"); 8150 break; 8151 case DIF_OP_LDGA: 8152 case DIF_OP_LDTA: 8153 if (r1 > DIF_VAR_ARRAY_MAX) 8154 err += efunc(pc, "invalid array %u\n", r1); 8155 if (r2 >= nregs) 8156 err += efunc(pc, "invalid register %u\n", r2); 8157 if (rd >= nregs) 8158 err += efunc(pc, "invalid register %u\n", rd); 8159 if (rd == 0) 8160 err += efunc(pc, "cannot write to %r0\n"); 8161 break; 8162 case DIF_OP_LDGS: 8163 case DIF_OP_LDTS: 8164 case DIF_OP_LDLS: 8165 case DIF_OP_LDGAA: 8166 case DIF_OP_LDTAA: 8167 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8168 err += efunc(pc, "invalid variable %u\n", v); 8169 if (rd >= nregs) 8170 err += efunc(pc, "invalid register %u\n", rd); 8171 if (rd == 0) 8172 err += efunc(pc, "cannot write to %r0\n"); 8173 break; 8174 case DIF_OP_STGS: 8175 case DIF_OP_STTS: 8176 case DIF_OP_STLS: 8177 case DIF_OP_STGAA: 8178 case DIF_OP_STTAA: 8179 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8180 err += efunc(pc, "invalid variable %u\n", v); 8181 if (rs >= nregs) 8182 err += efunc(pc, "invalid register %u\n", rd); 8183 break; 8184 case DIF_OP_CALL: 8185 if (subr > DIF_SUBR_MAX) 8186 err += efunc(pc, "invalid subr %u\n", subr); 8187 if (rd >= nregs) 8188 err += efunc(pc, "invalid register %u\n", rd); 8189 if (rd == 0) 8190 err += efunc(pc, "cannot write to %r0\n"); 8191 8192 if (subr == DIF_SUBR_COPYOUT || 8193 subr == DIF_SUBR_COPYOUTSTR) { 8194 dp->dtdo_destructive = 1; 8195 } 8196 break; 8197 case DIF_OP_PUSHTR: 8198 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8199 err += efunc(pc, "invalid ref type %u\n", type); 8200 if (r2 >= nregs) 8201 err += efunc(pc, "invalid register %u\n", r2); 8202 if (rs >= nregs) 8203 err += efunc(pc, "invalid register %u\n", rs); 8204 break; 8205 case DIF_OP_PUSHTV: 8206 if (type != DIF_TYPE_CTF) 8207 err += efunc(pc, "invalid val type %u\n", type); 8208 if (r2 >= nregs) 8209 err += efunc(pc, "invalid register %u\n", r2); 8210 if (rs >= nregs) 8211 err += efunc(pc, "invalid register %u\n", rs); 8212 break; 8213 default: 8214 err += efunc(pc, "invalid opcode %u\n", 8215 DIF_INSTR_OP(instr)); 8216 } 8217 } 8218 8219 if (dp->dtdo_len != 0 && 8220 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8221 err += efunc(dp->dtdo_len - 1, 8222 "expected 'ret' as last DIF instruction\n"); 8223 } 8224 8225 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8226 /* 8227 * If we're not returning by reference, the size must be either 8228 * 0 or the size of one of the base types. 8229 */ 8230 switch (dp->dtdo_rtype.dtdt_size) { 8231 case 0: 8232 case sizeof (uint8_t): 8233 case sizeof (uint16_t): 8234 case sizeof (uint32_t): 8235 case sizeof (uint64_t): 8236 break; 8237 8238 default: 8239 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 8240 } 8241 } 8242 8243 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8244 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8245 dtrace_diftype_t *vt, *et; 8246 uint_t id, ndx; 8247 8248 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8249 v->dtdv_scope != DIFV_SCOPE_THREAD && 8250 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8251 err += efunc(i, "unrecognized variable scope %d\n", 8252 v->dtdv_scope); 8253 break; 8254 } 8255 8256 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8257 v->dtdv_kind != DIFV_KIND_SCALAR) { 8258 err += efunc(i, "unrecognized variable type %d\n", 8259 v->dtdv_kind); 8260 break; 8261 } 8262 8263 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8264 err += efunc(i, "%d exceeds variable id limit\n", id); 8265 break; 8266 } 8267 8268 if (id < DIF_VAR_OTHER_UBASE) 8269 continue; 8270 8271 /* 8272 * For user-defined variables, we need to check that this 8273 * definition is identical to any previous definition that we 8274 * encountered. 8275 */ 8276 ndx = id - DIF_VAR_OTHER_UBASE; 8277 8278 switch (v->dtdv_scope) { 8279 case DIFV_SCOPE_GLOBAL: 8280 if (ndx < vstate->dtvs_nglobals) { 8281 dtrace_statvar_t *svar; 8282 8283 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8284 existing = &svar->dtsv_var; 8285 } 8286 8287 break; 8288 8289 case DIFV_SCOPE_THREAD: 8290 if (ndx < vstate->dtvs_ntlocals) 8291 existing = &vstate->dtvs_tlocals[ndx]; 8292 break; 8293 8294 case DIFV_SCOPE_LOCAL: 8295 if (ndx < vstate->dtvs_nlocals) { 8296 dtrace_statvar_t *svar; 8297 8298 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8299 existing = &svar->dtsv_var; 8300 } 8301 8302 break; 8303 } 8304 8305 vt = &v->dtdv_type; 8306 8307 if (vt->dtdt_flags & DIF_TF_BYREF) { 8308 if (vt->dtdt_size == 0) { 8309 err += efunc(i, "zero-sized variable\n"); 8310 break; 8311 } 8312 8313 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8314 vt->dtdt_size > dtrace_global_maxsize) { 8315 err += efunc(i, "oversized by-ref global\n"); 8316 break; 8317 } 8318 } 8319 8320 if (existing == NULL || existing->dtdv_id == 0) 8321 continue; 8322 8323 ASSERT(existing->dtdv_id == v->dtdv_id); 8324 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8325 8326 if (existing->dtdv_kind != v->dtdv_kind) 8327 err += efunc(i, "%d changed variable kind\n", id); 8328 8329 et = &existing->dtdv_type; 8330 8331 if (vt->dtdt_flags != et->dtdt_flags) { 8332 err += efunc(i, "%d changed variable type flags\n", id); 8333 break; 8334 } 8335 8336 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8337 err += efunc(i, "%d changed variable type size\n", id); 8338 break; 8339 } 8340 } 8341 8342 return (err); 8343 } 8344 8345 /* 8346 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8347 * are much more constrained than normal DIFOs. Specifically, they may 8348 * not: 8349 * 8350 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8351 * miscellaneous string routines 8352 * 2. Access DTrace variables other than the args[] array, and the 8353 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8354 * 3. Have thread-local variables. 8355 * 4. Have dynamic variables. 8356 */ 8357 static int 8358 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8359 { 8360 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8361 int err = 0; 8362 uint_t pc; 8363 8364 for (pc = 0; pc < dp->dtdo_len; pc++) { 8365 dif_instr_t instr = dp->dtdo_buf[pc]; 8366 8367 uint_t v = DIF_INSTR_VAR(instr); 8368 uint_t subr = DIF_INSTR_SUBR(instr); 8369 uint_t op = DIF_INSTR_OP(instr); 8370 8371 switch (op) { 8372 case DIF_OP_OR: 8373 case DIF_OP_XOR: 8374 case DIF_OP_AND: 8375 case DIF_OP_SLL: 8376 case DIF_OP_SRL: 8377 case DIF_OP_SRA: 8378 case DIF_OP_SUB: 8379 case DIF_OP_ADD: 8380 case DIF_OP_MUL: 8381 case DIF_OP_SDIV: 8382 case DIF_OP_UDIV: 8383 case DIF_OP_SREM: 8384 case DIF_OP_UREM: 8385 case DIF_OP_COPYS: 8386 case DIF_OP_NOT: 8387 case DIF_OP_MOV: 8388 case DIF_OP_RLDSB: 8389 case DIF_OP_RLDSH: 8390 case DIF_OP_RLDSW: 8391 case DIF_OP_RLDUB: 8392 case DIF_OP_RLDUH: 8393 case DIF_OP_RLDUW: 8394 case DIF_OP_RLDX: 8395 case DIF_OP_ULDSB: 8396 case DIF_OP_ULDSH: 8397 case DIF_OP_ULDSW: 8398 case DIF_OP_ULDUB: 8399 case DIF_OP_ULDUH: 8400 case DIF_OP_ULDUW: 8401 case DIF_OP_ULDX: 8402 case DIF_OP_STB: 8403 case DIF_OP_STH: 8404 case DIF_OP_STW: 8405 case DIF_OP_STX: 8406 case DIF_OP_ALLOCS: 8407 case DIF_OP_CMP: 8408 case DIF_OP_SCMP: 8409 case DIF_OP_TST: 8410 case DIF_OP_BA: 8411 case DIF_OP_BE: 8412 case DIF_OP_BNE: 8413 case DIF_OP_BG: 8414 case DIF_OP_BGU: 8415 case DIF_OP_BGE: 8416 case DIF_OP_BGEU: 8417 case DIF_OP_BL: 8418 case DIF_OP_BLU: 8419 case DIF_OP_BLE: 8420 case DIF_OP_BLEU: 8421 case DIF_OP_RET: 8422 case DIF_OP_NOP: 8423 case DIF_OP_POPTS: 8424 case DIF_OP_FLUSHTS: 8425 case DIF_OP_SETX: 8426 case DIF_OP_SETS: 8427 case DIF_OP_LDGA: 8428 case DIF_OP_LDLS: 8429 case DIF_OP_STGS: 8430 case DIF_OP_STLS: 8431 case DIF_OP_PUSHTR: 8432 case DIF_OP_PUSHTV: 8433 break; 8434 8435 case DIF_OP_LDGS: 8436 if (v >= DIF_VAR_OTHER_UBASE) 8437 break; 8438 8439 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8440 break; 8441 8442 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8443 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8444 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8445 v == DIF_VAR_UID || v == DIF_VAR_GID) 8446 break; 8447 8448 err += efunc(pc, "illegal variable %u\n", v); 8449 break; 8450 8451 case DIF_OP_LDTA: 8452 case DIF_OP_LDTS: 8453 case DIF_OP_LDGAA: 8454 case DIF_OP_LDTAA: 8455 err += efunc(pc, "illegal dynamic variable load\n"); 8456 break; 8457 8458 case DIF_OP_STTS: 8459 case DIF_OP_STGAA: 8460 case DIF_OP_STTAA: 8461 err += efunc(pc, "illegal dynamic variable store\n"); 8462 break; 8463 8464 case DIF_OP_CALL: 8465 if (subr == DIF_SUBR_ALLOCA || 8466 subr == DIF_SUBR_BCOPY || 8467 subr == DIF_SUBR_COPYIN || 8468 subr == DIF_SUBR_COPYINTO || 8469 subr == DIF_SUBR_COPYINSTR || 8470 subr == DIF_SUBR_INDEX || 8471 subr == DIF_SUBR_INET_NTOA || 8472 subr == DIF_SUBR_INET_NTOA6 || 8473 subr == DIF_SUBR_INET_NTOP || 8474 subr == DIF_SUBR_LLTOSTR || 8475 subr == DIF_SUBR_RINDEX || 8476 subr == DIF_SUBR_STRCHR || 8477 subr == DIF_SUBR_STRJOIN || 8478 subr == DIF_SUBR_STRRCHR || 8479 subr == DIF_SUBR_STRSTR || 8480 subr == DIF_SUBR_HTONS || 8481 subr == DIF_SUBR_HTONL || 8482 subr == DIF_SUBR_HTONLL || 8483 subr == DIF_SUBR_NTOHS || 8484 subr == DIF_SUBR_NTOHL || 8485 subr == DIF_SUBR_NTOHLL) 8486 break; 8487 8488 err += efunc(pc, "invalid subr %u\n", subr); 8489 break; 8490 8491 default: 8492 err += efunc(pc, "invalid opcode %u\n", 8493 DIF_INSTR_OP(instr)); 8494 } 8495 } 8496 8497 return (err); 8498 } 8499 8500 /* 8501 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8502 * basis; 0 if not. 8503 */ 8504 static int 8505 dtrace_difo_cacheable(dtrace_difo_t *dp) 8506 { 8507 int i; 8508 8509 if (dp == NULL) 8510 return (0); 8511 8512 for (i = 0; i < dp->dtdo_varlen; i++) { 8513 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8514 8515 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8516 continue; 8517 8518 switch (v->dtdv_id) { 8519 case DIF_VAR_CURTHREAD: 8520 case DIF_VAR_PID: 8521 case DIF_VAR_TID: 8522 case DIF_VAR_EXECNAME: 8523 case DIF_VAR_ZONENAME: 8524 break; 8525 8526 default: 8527 return (0); 8528 } 8529 } 8530 8531 /* 8532 * This DIF object may be cacheable. Now we need to look for any 8533 * array loading instructions, any memory loading instructions, or 8534 * any stores to thread-local variables. 8535 */ 8536 for (i = 0; i < dp->dtdo_len; i++) { 8537 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8538 8539 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8540 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8541 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8542 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8543 return (0); 8544 } 8545 8546 return (1); 8547 } 8548 8549 static void 8550 dtrace_difo_hold(dtrace_difo_t *dp) 8551 { 8552 int i; 8553 8554 ASSERT(MUTEX_HELD(&dtrace_lock)); 8555 8556 dp->dtdo_refcnt++; 8557 ASSERT(dp->dtdo_refcnt != 0); 8558 8559 /* 8560 * We need to check this DIF object for references to the variable 8561 * DIF_VAR_VTIMESTAMP. 8562 */ 8563 for (i = 0; i < dp->dtdo_varlen; i++) { 8564 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8565 8566 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8567 continue; 8568 8569 if (dtrace_vtime_references++ == 0) 8570 dtrace_vtime_enable(); 8571 } 8572 } 8573 8574 /* 8575 * This routine calculates the dynamic variable chunksize for a given DIF 8576 * object. The calculation is not fool-proof, and can probably be tricked by 8577 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8578 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8579 * if a dynamic variable size exceeds the chunksize. 8580 */ 8581 static void 8582 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8583 { 8584 uint64_t sval; 8585 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8586 const dif_instr_t *text = dp->dtdo_buf; 8587 uint_t pc, srd = 0; 8588 uint_t ttop = 0; 8589 size_t size, ksize; 8590 uint_t id, i; 8591 8592 for (pc = 0; pc < dp->dtdo_len; pc++) { 8593 dif_instr_t instr = text[pc]; 8594 uint_t op = DIF_INSTR_OP(instr); 8595 uint_t rd = DIF_INSTR_RD(instr); 8596 uint_t r1 = DIF_INSTR_R1(instr); 8597 uint_t nkeys = 0; 8598 uchar_t scope; 8599 8600 dtrace_key_t *key = tupregs; 8601 8602 switch (op) { 8603 case DIF_OP_SETX: 8604 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8605 srd = rd; 8606 continue; 8607 8608 case DIF_OP_STTS: 8609 key = &tupregs[DIF_DTR_NREGS]; 8610 key[0].dttk_size = 0; 8611 key[1].dttk_size = 0; 8612 nkeys = 2; 8613 scope = DIFV_SCOPE_THREAD; 8614 break; 8615 8616 case DIF_OP_STGAA: 8617 case DIF_OP_STTAA: 8618 nkeys = ttop; 8619 8620 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8621 key[nkeys++].dttk_size = 0; 8622 8623 key[nkeys++].dttk_size = 0; 8624 8625 if (op == DIF_OP_STTAA) { 8626 scope = DIFV_SCOPE_THREAD; 8627 } else { 8628 scope = DIFV_SCOPE_GLOBAL; 8629 } 8630 8631 break; 8632 8633 case DIF_OP_PUSHTR: 8634 if (ttop == DIF_DTR_NREGS) 8635 return; 8636 8637 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8638 /* 8639 * If the register for the size of the "pushtr" 8640 * is %r0 (or the value is 0) and the type is 8641 * a string, we'll use the system-wide default 8642 * string size. 8643 */ 8644 tupregs[ttop++].dttk_size = 8645 dtrace_strsize_default; 8646 } else { 8647 if (srd == 0) 8648 return; 8649 8650 tupregs[ttop++].dttk_size = sval; 8651 } 8652 8653 break; 8654 8655 case DIF_OP_PUSHTV: 8656 if (ttop == DIF_DTR_NREGS) 8657 return; 8658 8659 tupregs[ttop++].dttk_size = 0; 8660 break; 8661 8662 case DIF_OP_FLUSHTS: 8663 ttop = 0; 8664 break; 8665 8666 case DIF_OP_POPTS: 8667 if (ttop != 0) 8668 ttop--; 8669 break; 8670 } 8671 8672 sval = 0; 8673 srd = 0; 8674 8675 if (nkeys == 0) 8676 continue; 8677 8678 /* 8679 * We have a dynamic variable allocation; calculate its size. 8680 */ 8681 for (ksize = 0, i = 0; i < nkeys; i++) 8682 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 8683 8684 size = sizeof (dtrace_dynvar_t); 8685 size += sizeof (dtrace_key_t) * (nkeys - 1); 8686 size += ksize; 8687 8688 /* 8689 * Now we need to determine the size of the stored data. 8690 */ 8691 id = DIF_INSTR_VAR(instr); 8692 8693 for (i = 0; i < dp->dtdo_varlen; i++) { 8694 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8695 8696 if (v->dtdv_id == id && v->dtdv_scope == scope) { 8697 size += v->dtdv_type.dtdt_size; 8698 break; 8699 } 8700 } 8701 8702 if (i == dp->dtdo_varlen) 8703 return; 8704 8705 /* 8706 * We have the size. If this is larger than the chunk size 8707 * for our dynamic variable state, reset the chunk size. 8708 */ 8709 size = P2ROUNDUP(size, sizeof (uint64_t)); 8710 8711 if (size > vstate->dtvs_dynvars.dtds_chunksize) 8712 vstate->dtvs_dynvars.dtds_chunksize = size; 8713 } 8714 } 8715 8716 static void 8717 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8718 { 8719 int i, oldsvars, osz, nsz, otlocals, ntlocals; 8720 uint_t id; 8721 8722 ASSERT(MUTEX_HELD(&dtrace_lock)); 8723 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 8724 8725 for (i = 0; i < dp->dtdo_varlen; i++) { 8726 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8727 dtrace_statvar_t *svar, ***svarp; 8728 size_t dsize = 0; 8729 uint8_t scope = v->dtdv_scope; 8730 int *np; 8731 8732 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8733 continue; 8734 8735 id -= DIF_VAR_OTHER_UBASE; 8736 8737 switch (scope) { 8738 case DIFV_SCOPE_THREAD: 8739 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 8740 dtrace_difv_t *tlocals; 8741 8742 if ((ntlocals = (otlocals << 1)) == 0) 8743 ntlocals = 1; 8744 8745 osz = otlocals * sizeof (dtrace_difv_t); 8746 nsz = ntlocals * sizeof (dtrace_difv_t); 8747 8748 tlocals = kmem_zalloc(nsz, KM_SLEEP); 8749 8750 if (osz != 0) { 8751 bcopy(vstate->dtvs_tlocals, 8752 tlocals, osz); 8753 kmem_free(vstate->dtvs_tlocals, osz); 8754 } 8755 8756 vstate->dtvs_tlocals = tlocals; 8757 vstate->dtvs_ntlocals = ntlocals; 8758 } 8759 8760 vstate->dtvs_tlocals[id] = *v; 8761 continue; 8762 8763 case DIFV_SCOPE_LOCAL: 8764 np = &vstate->dtvs_nlocals; 8765 svarp = &vstate->dtvs_locals; 8766 8767 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8768 dsize = NCPU * (v->dtdv_type.dtdt_size + 8769 sizeof (uint64_t)); 8770 else 8771 dsize = NCPU * sizeof (uint64_t); 8772 8773 break; 8774 8775 case DIFV_SCOPE_GLOBAL: 8776 np = &vstate->dtvs_nglobals; 8777 svarp = &vstate->dtvs_globals; 8778 8779 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8780 dsize = v->dtdv_type.dtdt_size + 8781 sizeof (uint64_t); 8782 8783 break; 8784 8785 default: 8786 ASSERT(0); 8787 } 8788 8789 while (id >= (oldsvars = *np)) { 8790 dtrace_statvar_t **statics; 8791 int newsvars, oldsize, newsize; 8792 8793 if ((newsvars = (oldsvars << 1)) == 0) 8794 newsvars = 1; 8795 8796 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 8797 newsize = newsvars * sizeof (dtrace_statvar_t *); 8798 8799 statics = kmem_zalloc(newsize, KM_SLEEP); 8800 8801 if (oldsize != 0) { 8802 bcopy(*svarp, statics, oldsize); 8803 kmem_free(*svarp, oldsize); 8804 } 8805 8806 *svarp = statics; 8807 *np = newsvars; 8808 } 8809 8810 if ((svar = (*svarp)[id]) == NULL) { 8811 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 8812 svar->dtsv_var = *v; 8813 8814 if ((svar->dtsv_size = dsize) != 0) { 8815 svar->dtsv_data = (uint64_t)(uintptr_t) 8816 kmem_zalloc(dsize, KM_SLEEP); 8817 } 8818 8819 (*svarp)[id] = svar; 8820 } 8821 8822 svar->dtsv_refcnt++; 8823 } 8824 8825 dtrace_difo_chunksize(dp, vstate); 8826 dtrace_difo_hold(dp); 8827 } 8828 8829 static dtrace_difo_t * 8830 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8831 { 8832 dtrace_difo_t *new; 8833 size_t sz; 8834 8835 ASSERT(dp->dtdo_buf != NULL); 8836 ASSERT(dp->dtdo_refcnt != 0); 8837 8838 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 8839 8840 ASSERT(dp->dtdo_buf != NULL); 8841 sz = dp->dtdo_len * sizeof (dif_instr_t); 8842 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 8843 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 8844 new->dtdo_len = dp->dtdo_len; 8845 8846 if (dp->dtdo_strtab != NULL) { 8847 ASSERT(dp->dtdo_strlen != 0); 8848 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 8849 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 8850 new->dtdo_strlen = dp->dtdo_strlen; 8851 } 8852 8853 if (dp->dtdo_inttab != NULL) { 8854 ASSERT(dp->dtdo_intlen != 0); 8855 sz = dp->dtdo_intlen * sizeof (uint64_t); 8856 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8857 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8858 new->dtdo_intlen = dp->dtdo_intlen; 8859 } 8860 8861 if (dp->dtdo_vartab != NULL) { 8862 ASSERT(dp->dtdo_varlen != 0); 8863 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8864 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8865 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8866 new->dtdo_varlen = dp->dtdo_varlen; 8867 } 8868 8869 dtrace_difo_init(new, vstate); 8870 return (new); 8871 } 8872 8873 static void 8874 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8875 { 8876 int i; 8877 8878 ASSERT(dp->dtdo_refcnt == 0); 8879 8880 for (i = 0; i < dp->dtdo_varlen; i++) { 8881 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8882 dtrace_statvar_t *svar, **svarp; 8883 uint_t id; 8884 uint8_t scope = v->dtdv_scope; 8885 int *np; 8886 8887 switch (scope) { 8888 case DIFV_SCOPE_THREAD: 8889 continue; 8890 8891 case DIFV_SCOPE_LOCAL: 8892 np = &vstate->dtvs_nlocals; 8893 svarp = vstate->dtvs_locals; 8894 break; 8895 8896 case DIFV_SCOPE_GLOBAL: 8897 np = &vstate->dtvs_nglobals; 8898 svarp = vstate->dtvs_globals; 8899 break; 8900 8901 default: 8902 ASSERT(0); 8903 } 8904 8905 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8906 continue; 8907 8908 id -= DIF_VAR_OTHER_UBASE; 8909 ASSERT(id < *np); 8910 8911 svar = svarp[id]; 8912 ASSERT(svar != NULL); 8913 ASSERT(svar->dtsv_refcnt > 0); 8914 8915 if (--svar->dtsv_refcnt > 0) 8916 continue; 8917 8918 if (svar->dtsv_size != 0) { 8919 ASSERT(svar->dtsv_data != NULL); 8920 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8921 svar->dtsv_size); 8922 } 8923 8924 kmem_free(svar, sizeof (dtrace_statvar_t)); 8925 svarp[id] = NULL; 8926 } 8927 8928 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8929 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8930 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8931 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8932 8933 kmem_free(dp, sizeof (dtrace_difo_t)); 8934 } 8935 8936 static void 8937 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8938 { 8939 int i; 8940 8941 ASSERT(MUTEX_HELD(&dtrace_lock)); 8942 ASSERT(dp->dtdo_refcnt != 0); 8943 8944 for (i = 0; i < dp->dtdo_varlen; i++) { 8945 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8946 8947 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8948 continue; 8949 8950 ASSERT(dtrace_vtime_references > 0); 8951 if (--dtrace_vtime_references == 0) 8952 dtrace_vtime_disable(); 8953 } 8954 8955 if (--dp->dtdo_refcnt == 0) 8956 dtrace_difo_destroy(dp, vstate); 8957 } 8958 8959 /* 8960 * DTrace Format Functions 8961 */ 8962 static uint16_t 8963 dtrace_format_add(dtrace_state_t *state, char *str) 8964 { 8965 char *fmt, **new; 8966 uint16_t ndx, len = strlen(str) + 1; 8967 8968 fmt = kmem_zalloc(len, KM_SLEEP); 8969 bcopy(str, fmt, len); 8970 8971 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 8972 if (state->dts_formats[ndx] == NULL) { 8973 state->dts_formats[ndx] = fmt; 8974 return (ndx + 1); 8975 } 8976 } 8977 8978 if (state->dts_nformats == USHRT_MAX) { 8979 /* 8980 * This is only likely if a denial-of-service attack is being 8981 * attempted. As such, it's okay to fail silently here. 8982 */ 8983 kmem_free(fmt, len); 8984 return (0); 8985 } 8986 8987 /* 8988 * For simplicity, we always resize the formats array to be exactly the 8989 * number of formats. 8990 */ 8991 ndx = state->dts_nformats++; 8992 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 8993 8994 if (state->dts_formats != NULL) { 8995 ASSERT(ndx != 0); 8996 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 8997 kmem_free(state->dts_formats, ndx * sizeof (char *)); 8998 } 8999 9000 state->dts_formats = new; 9001 state->dts_formats[ndx] = fmt; 9002 9003 return (ndx + 1); 9004 } 9005 9006 static void 9007 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9008 { 9009 char *fmt; 9010 9011 ASSERT(state->dts_formats != NULL); 9012 ASSERT(format <= state->dts_nformats); 9013 ASSERT(state->dts_formats[format - 1] != NULL); 9014 9015 fmt = state->dts_formats[format - 1]; 9016 kmem_free(fmt, strlen(fmt) + 1); 9017 state->dts_formats[format - 1] = NULL; 9018 } 9019 9020 static void 9021 dtrace_format_destroy(dtrace_state_t *state) 9022 { 9023 int i; 9024 9025 if (state->dts_nformats == 0) { 9026 ASSERT(state->dts_formats == NULL); 9027 return; 9028 } 9029 9030 ASSERT(state->dts_formats != NULL); 9031 9032 for (i = 0; i < state->dts_nformats; i++) { 9033 char *fmt = state->dts_formats[i]; 9034 9035 if (fmt == NULL) 9036 continue; 9037 9038 kmem_free(fmt, strlen(fmt) + 1); 9039 } 9040 9041 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9042 state->dts_nformats = 0; 9043 state->dts_formats = NULL; 9044 } 9045 9046 /* 9047 * DTrace Predicate Functions 9048 */ 9049 static dtrace_predicate_t * 9050 dtrace_predicate_create(dtrace_difo_t *dp) 9051 { 9052 dtrace_predicate_t *pred; 9053 9054 ASSERT(MUTEX_HELD(&dtrace_lock)); 9055 ASSERT(dp->dtdo_refcnt != 0); 9056 9057 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9058 pred->dtp_difo = dp; 9059 pred->dtp_refcnt = 1; 9060 9061 if (!dtrace_difo_cacheable(dp)) 9062 return (pred); 9063 9064 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9065 /* 9066 * This is only theoretically possible -- we have had 2^32 9067 * cacheable predicates on this machine. We cannot allow any 9068 * more predicates to become cacheable: as unlikely as it is, 9069 * there may be a thread caching a (now stale) predicate cache 9070 * ID. (N.B.: the temptation is being successfully resisted to 9071 * have this cmn_err() "Holy shit -- we executed this code!") 9072 */ 9073 return (pred); 9074 } 9075 9076 pred->dtp_cacheid = dtrace_predcache_id++; 9077 9078 return (pred); 9079 } 9080 9081 static void 9082 dtrace_predicate_hold(dtrace_predicate_t *pred) 9083 { 9084 ASSERT(MUTEX_HELD(&dtrace_lock)); 9085 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9086 ASSERT(pred->dtp_refcnt > 0); 9087 9088 pred->dtp_refcnt++; 9089 } 9090 9091 static void 9092 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9093 { 9094 dtrace_difo_t *dp = pred->dtp_difo; 9095 9096 ASSERT(MUTEX_HELD(&dtrace_lock)); 9097 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9098 ASSERT(pred->dtp_refcnt > 0); 9099 9100 if (--pred->dtp_refcnt == 0) { 9101 dtrace_difo_release(pred->dtp_difo, vstate); 9102 kmem_free(pred, sizeof (dtrace_predicate_t)); 9103 } 9104 } 9105 9106 /* 9107 * DTrace Action Description Functions 9108 */ 9109 static dtrace_actdesc_t * 9110 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9111 uint64_t uarg, uint64_t arg) 9112 { 9113 dtrace_actdesc_t *act; 9114 9115 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9116 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9117 9118 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9119 act->dtad_kind = kind; 9120 act->dtad_ntuple = ntuple; 9121 act->dtad_uarg = uarg; 9122 act->dtad_arg = arg; 9123 act->dtad_refcnt = 1; 9124 9125 return (act); 9126 } 9127 9128 static void 9129 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9130 { 9131 ASSERT(act->dtad_refcnt >= 1); 9132 act->dtad_refcnt++; 9133 } 9134 9135 static void 9136 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9137 { 9138 dtrace_actkind_t kind = act->dtad_kind; 9139 dtrace_difo_t *dp; 9140 9141 ASSERT(act->dtad_refcnt >= 1); 9142 9143 if (--act->dtad_refcnt != 0) 9144 return; 9145 9146 if ((dp = act->dtad_difo) != NULL) 9147 dtrace_difo_release(dp, vstate); 9148 9149 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9150 char *str = (char *)(uintptr_t)act->dtad_arg; 9151 9152 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9153 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9154 9155 if (str != NULL) 9156 kmem_free(str, strlen(str) + 1); 9157 } 9158 9159 kmem_free(act, sizeof (dtrace_actdesc_t)); 9160 } 9161 9162 /* 9163 * DTrace ECB Functions 9164 */ 9165 static dtrace_ecb_t * 9166 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9167 { 9168 dtrace_ecb_t *ecb; 9169 dtrace_epid_t epid; 9170 9171 ASSERT(MUTEX_HELD(&dtrace_lock)); 9172 9173 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9174 ecb->dte_predicate = NULL; 9175 ecb->dte_probe = probe; 9176 9177 /* 9178 * The default size is the size of the default action: recording 9179 * the epid. 9180 */ 9181 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9182 ecb->dte_alignment = sizeof (dtrace_epid_t); 9183 9184 epid = state->dts_epid++; 9185 9186 if (epid - 1 >= state->dts_necbs) { 9187 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9188 int necbs = state->dts_necbs << 1; 9189 9190 ASSERT(epid == state->dts_necbs + 1); 9191 9192 if (necbs == 0) { 9193 ASSERT(oecbs == NULL); 9194 necbs = 1; 9195 } 9196 9197 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9198 9199 if (oecbs != NULL) 9200 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9201 9202 dtrace_membar_producer(); 9203 state->dts_ecbs = ecbs; 9204 9205 if (oecbs != NULL) { 9206 /* 9207 * If this state is active, we must dtrace_sync() 9208 * before we can free the old dts_ecbs array: we're 9209 * coming in hot, and there may be active ring 9210 * buffer processing (which indexes into the dts_ecbs 9211 * array) on another CPU. 9212 */ 9213 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9214 dtrace_sync(); 9215 9216 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9217 } 9218 9219 dtrace_membar_producer(); 9220 state->dts_necbs = necbs; 9221 } 9222 9223 ecb->dte_state = state; 9224 9225 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9226 dtrace_membar_producer(); 9227 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9228 9229 return (ecb); 9230 } 9231 9232 static int 9233 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9234 { 9235 dtrace_probe_t *probe = ecb->dte_probe; 9236 9237 ASSERT(MUTEX_HELD(&cpu_lock)); 9238 ASSERT(MUTEX_HELD(&dtrace_lock)); 9239 ASSERT(ecb->dte_next == NULL); 9240 9241 if (probe == NULL) { 9242 /* 9243 * This is the NULL probe -- there's nothing to do. 9244 */ 9245 return (0); 9246 } 9247 9248 if (probe->dtpr_ecb == NULL) { 9249 dtrace_provider_t *prov = probe->dtpr_provider; 9250 9251 /* 9252 * We're the first ECB on this probe. 9253 */ 9254 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9255 9256 if (ecb->dte_predicate != NULL) 9257 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9258 9259 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9260 probe->dtpr_id, probe->dtpr_arg)); 9261 } else { 9262 /* 9263 * This probe is already active. Swing the last pointer to 9264 * point to the new ECB, and issue a dtrace_sync() to assure 9265 * that all CPUs have seen the change. 9266 */ 9267 ASSERT(probe->dtpr_ecb_last != NULL); 9268 probe->dtpr_ecb_last->dte_next = ecb; 9269 probe->dtpr_ecb_last = ecb; 9270 probe->dtpr_predcache = 0; 9271 9272 dtrace_sync(); 9273 return (0); 9274 } 9275 } 9276 9277 static void 9278 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9279 { 9280 uint32_t maxalign = sizeof (dtrace_epid_t); 9281 uint32_t align = sizeof (uint8_t), offs, diff; 9282 dtrace_action_t *act; 9283 int wastuple = 0; 9284 uint32_t aggbase = UINT32_MAX; 9285 dtrace_state_t *state = ecb->dte_state; 9286 9287 /* 9288 * If we record anything, we always record the epid. (And we always 9289 * record it first.) 9290 */ 9291 offs = sizeof (dtrace_epid_t); 9292 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9293 9294 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9295 dtrace_recdesc_t *rec = &act->dta_rec; 9296 9297 if ((align = rec->dtrd_alignment) > maxalign) 9298 maxalign = align; 9299 9300 if (!wastuple && act->dta_intuple) { 9301 /* 9302 * This is the first record in a tuple. Align the 9303 * offset to be at offset 4 in an 8-byte aligned 9304 * block. 9305 */ 9306 diff = offs + sizeof (dtrace_aggid_t); 9307 9308 if (diff = (diff & (sizeof (uint64_t) - 1))) 9309 offs += sizeof (uint64_t) - diff; 9310 9311 aggbase = offs - sizeof (dtrace_aggid_t); 9312 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9313 } 9314 9315 /*LINTED*/ 9316 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9317 /* 9318 * The current offset is not properly aligned; align it. 9319 */ 9320 offs += align - diff; 9321 } 9322 9323 rec->dtrd_offset = offs; 9324 9325 if (offs + rec->dtrd_size > ecb->dte_needed) { 9326 ecb->dte_needed = offs + rec->dtrd_size; 9327 9328 if (ecb->dte_needed > state->dts_needed) 9329 state->dts_needed = ecb->dte_needed; 9330 } 9331 9332 if (DTRACEACT_ISAGG(act->dta_kind)) { 9333 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9334 dtrace_action_t *first = agg->dtag_first, *prev; 9335 9336 ASSERT(rec->dtrd_size != 0 && first != NULL); 9337 ASSERT(wastuple); 9338 ASSERT(aggbase != UINT32_MAX); 9339 9340 agg->dtag_base = aggbase; 9341 9342 while ((prev = first->dta_prev) != NULL && 9343 DTRACEACT_ISAGG(prev->dta_kind)) { 9344 agg = (dtrace_aggregation_t *)prev; 9345 first = agg->dtag_first; 9346 } 9347 9348 if (prev != NULL) { 9349 offs = prev->dta_rec.dtrd_offset + 9350 prev->dta_rec.dtrd_size; 9351 } else { 9352 offs = sizeof (dtrace_epid_t); 9353 } 9354 wastuple = 0; 9355 } else { 9356 if (!act->dta_intuple) 9357 ecb->dte_size = offs + rec->dtrd_size; 9358 9359 offs += rec->dtrd_size; 9360 } 9361 9362 wastuple = act->dta_intuple; 9363 } 9364 9365 if ((act = ecb->dte_action) != NULL && 9366 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9367 ecb->dte_size == sizeof (dtrace_epid_t)) { 9368 /* 9369 * If the size is still sizeof (dtrace_epid_t), then all 9370 * actions store no data; set the size to 0. 9371 */ 9372 ecb->dte_alignment = maxalign; 9373 ecb->dte_size = 0; 9374 9375 /* 9376 * If the needed space is still sizeof (dtrace_epid_t), then 9377 * all actions need no additional space; set the needed 9378 * size to 0. 9379 */ 9380 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9381 ecb->dte_needed = 0; 9382 9383 return; 9384 } 9385 9386 /* 9387 * Set our alignment, and make sure that the dte_size and dte_needed 9388 * are aligned to the size of an EPID. 9389 */ 9390 ecb->dte_alignment = maxalign; 9391 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9392 ~(sizeof (dtrace_epid_t) - 1); 9393 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9394 ~(sizeof (dtrace_epid_t) - 1); 9395 ASSERT(ecb->dte_size <= ecb->dte_needed); 9396 } 9397 9398 static dtrace_action_t * 9399 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9400 { 9401 dtrace_aggregation_t *agg; 9402 size_t size = sizeof (uint64_t); 9403 int ntuple = desc->dtad_ntuple; 9404 dtrace_action_t *act; 9405 dtrace_recdesc_t *frec; 9406 dtrace_aggid_t aggid; 9407 dtrace_state_t *state = ecb->dte_state; 9408 9409 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9410 agg->dtag_ecb = ecb; 9411 9412 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9413 9414 switch (desc->dtad_kind) { 9415 case DTRACEAGG_MIN: 9416 agg->dtag_initial = INT64_MAX; 9417 agg->dtag_aggregate = dtrace_aggregate_min; 9418 break; 9419 9420 case DTRACEAGG_MAX: 9421 agg->dtag_initial = INT64_MIN; 9422 agg->dtag_aggregate = dtrace_aggregate_max; 9423 break; 9424 9425 case DTRACEAGG_COUNT: 9426 agg->dtag_aggregate = dtrace_aggregate_count; 9427 break; 9428 9429 case DTRACEAGG_QUANTIZE: 9430 agg->dtag_aggregate = dtrace_aggregate_quantize; 9431 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9432 sizeof (uint64_t); 9433 break; 9434 9435 case DTRACEAGG_LQUANTIZE: { 9436 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9437 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9438 9439 agg->dtag_initial = desc->dtad_arg; 9440 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9441 9442 if (step == 0 || levels == 0) 9443 goto err; 9444 9445 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9446 break; 9447 } 9448 9449 case DTRACEAGG_LLQUANTIZE: { 9450 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 9451 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 9452 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 9453 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 9454 int64_t v; 9455 9456 agg->dtag_initial = desc->dtad_arg; 9457 agg->dtag_aggregate = dtrace_aggregate_llquantize; 9458 9459 if (factor < 2 || low >= high || nsteps < factor) 9460 goto err; 9461 9462 /* 9463 * Now check that the number of steps evenly divides a power 9464 * of the factor. (This assures both integer bucket size and 9465 * linearity within each magnitude.) 9466 */ 9467 for (v = factor; v < nsteps; v *= factor) 9468 continue; 9469 9470 if ((v % nsteps) || (nsteps % factor)) 9471 goto err; 9472 9473 size = (dtrace_aggregate_llquantize_bucket(factor, 9474 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 9475 break; 9476 } 9477 9478 case DTRACEAGG_AVG: 9479 agg->dtag_aggregate = dtrace_aggregate_avg; 9480 size = sizeof (uint64_t) * 2; 9481 break; 9482 9483 case DTRACEAGG_STDDEV: 9484 agg->dtag_aggregate = dtrace_aggregate_stddev; 9485 size = sizeof (uint64_t) * 4; 9486 break; 9487 9488 case DTRACEAGG_SUM: 9489 agg->dtag_aggregate = dtrace_aggregate_sum; 9490 break; 9491 9492 default: 9493 goto err; 9494 } 9495 9496 agg->dtag_action.dta_rec.dtrd_size = size; 9497 9498 if (ntuple == 0) 9499 goto err; 9500 9501 /* 9502 * We must make sure that we have enough actions for the n-tuple. 9503 */ 9504 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9505 if (DTRACEACT_ISAGG(act->dta_kind)) 9506 break; 9507 9508 if (--ntuple == 0) { 9509 /* 9510 * This is the action with which our n-tuple begins. 9511 */ 9512 agg->dtag_first = act; 9513 goto success; 9514 } 9515 } 9516 9517 /* 9518 * This n-tuple is short by ntuple elements. Return failure. 9519 */ 9520 ASSERT(ntuple != 0); 9521 err: 9522 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9523 return (NULL); 9524 9525 success: 9526 /* 9527 * If the last action in the tuple has a size of zero, it's actually 9528 * an expression argument for the aggregating action. 9529 */ 9530 ASSERT(ecb->dte_action_last != NULL); 9531 act = ecb->dte_action_last; 9532 9533 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9534 ASSERT(act->dta_difo != NULL); 9535 9536 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9537 agg->dtag_hasarg = 1; 9538 } 9539 9540 /* 9541 * We need to allocate an id for this aggregation. 9542 */ 9543 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9544 VM_BESTFIT | VM_SLEEP); 9545 9546 if (aggid - 1 >= state->dts_naggregations) { 9547 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9548 dtrace_aggregation_t **aggs; 9549 int naggs = state->dts_naggregations << 1; 9550 int onaggs = state->dts_naggregations; 9551 9552 ASSERT(aggid == state->dts_naggregations + 1); 9553 9554 if (naggs == 0) { 9555 ASSERT(oaggs == NULL); 9556 naggs = 1; 9557 } 9558 9559 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9560 9561 if (oaggs != NULL) { 9562 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9563 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9564 } 9565 9566 state->dts_aggregations = aggs; 9567 state->dts_naggregations = naggs; 9568 } 9569 9570 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9571 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9572 9573 frec = &agg->dtag_first->dta_rec; 9574 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9575 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9576 9577 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9578 ASSERT(!act->dta_intuple); 9579 act->dta_intuple = 1; 9580 } 9581 9582 return (&agg->dtag_action); 9583 } 9584 9585 static void 9586 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9587 { 9588 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9589 dtrace_state_t *state = ecb->dte_state; 9590 dtrace_aggid_t aggid = agg->dtag_id; 9591 9592 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9593 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9594 9595 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9596 state->dts_aggregations[aggid - 1] = NULL; 9597 9598 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9599 } 9600 9601 static int 9602 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9603 { 9604 dtrace_action_t *action, *last; 9605 dtrace_difo_t *dp = desc->dtad_difo; 9606 uint32_t size = 0, align = sizeof (uint8_t), mask; 9607 uint16_t format = 0; 9608 dtrace_recdesc_t *rec; 9609 dtrace_state_t *state = ecb->dte_state; 9610 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9611 uint64_t arg = desc->dtad_arg; 9612 9613 ASSERT(MUTEX_HELD(&dtrace_lock)); 9614 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9615 9616 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9617 /* 9618 * If this is an aggregating action, there must be neither 9619 * a speculate nor a commit on the action chain. 9620 */ 9621 dtrace_action_t *act; 9622 9623 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9624 if (act->dta_kind == DTRACEACT_COMMIT) 9625 return (EINVAL); 9626 9627 if (act->dta_kind == DTRACEACT_SPECULATE) 9628 return (EINVAL); 9629 } 9630 9631 action = dtrace_ecb_aggregation_create(ecb, desc); 9632 9633 if (action == NULL) 9634 return (EINVAL); 9635 } else { 9636 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9637 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9638 dp != NULL && dp->dtdo_destructive)) { 9639 state->dts_destructive = 1; 9640 } 9641 9642 switch (desc->dtad_kind) { 9643 case DTRACEACT_PRINTF: 9644 case DTRACEACT_PRINTA: 9645 case DTRACEACT_SYSTEM: 9646 case DTRACEACT_FREOPEN: 9647 /* 9648 * We know that our arg is a string -- turn it into a 9649 * format. 9650 */ 9651 if (arg == NULL) { 9652 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 9653 format = 0; 9654 } else { 9655 ASSERT(arg != NULL); 9656 ASSERT(arg > KERNELBASE); 9657 format = dtrace_format_add(state, 9658 (char *)(uintptr_t)arg); 9659 } 9660 9661 /*FALLTHROUGH*/ 9662 case DTRACEACT_LIBACT: 9663 case DTRACEACT_DIFEXPR: 9664 if (dp == NULL) 9665 return (EINVAL); 9666 9667 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9668 break; 9669 9670 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 9671 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9672 return (EINVAL); 9673 9674 size = opt[DTRACEOPT_STRSIZE]; 9675 } 9676 9677 break; 9678 9679 case DTRACEACT_STACK: 9680 if ((nframes = arg) == 0) { 9681 nframes = opt[DTRACEOPT_STACKFRAMES]; 9682 ASSERT(nframes > 0); 9683 arg = nframes; 9684 } 9685 9686 size = nframes * sizeof (pc_t); 9687 break; 9688 9689 case DTRACEACT_JSTACK: 9690 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 9691 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 9692 9693 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 9694 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 9695 9696 arg = DTRACE_USTACK_ARG(nframes, strsize); 9697 9698 /*FALLTHROUGH*/ 9699 case DTRACEACT_USTACK: 9700 if (desc->dtad_kind != DTRACEACT_JSTACK && 9701 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 9702 strsize = DTRACE_USTACK_STRSIZE(arg); 9703 nframes = opt[DTRACEOPT_USTACKFRAMES]; 9704 ASSERT(nframes > 0); 9705 arg = DTRACE_USTACK_ARG(nframes, strsize); 9706 } 9707 9708 /* 9709 * Save a slot for the pid. 9710 */ 9711 size = (nframes + 1) * sizeof (uint64_t); 9712 size += DTRACE_USTACK_STRSIZE(arg); 9713 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 9714 9715 break; 9716 9717 case DTRACEACT_SYM: 9718 case DTRACEACT_MOD: 9719 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 9720 sizeof (uint64_t)) || 9721 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9722 return (EINVAL); 9723 break; 9724 9725 case DTRACEACT_USYM: 9726 case DTRACEACT_UMOD: 9727 case DTRACEACT_UADDR: 9728 if (dp == NULL || 9729 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 9730 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9731 return (EINVAL); 9732 9733 /* 9734 * We have a slot for the pid, plus a slot for the 9735 * argument. To keep things simple (aligned with 9736 * bitness-neutral sizing), we store each as a 64-bit 9737 * quantity. 9738 */ 9739 size = 2 * sizeof (uint64_t); 9740 break; 9741 9742 case DTRACEACT_STOP: 9743 case DTRACEACT_BREAKPOINT: 9744 case DTRACEACT_PANIC: 9745 break; 9746 9747 case DTRACEACT_CHILL: 9748 case DTRACEACT_DISCARD: 9749 case DTRACEACT_RAISE: 9750 if (dp == NULL) 9751 return (EINVAL); 9752 break; 9753 9754 case DTRACEACT_EXIT: 9755 if (dp == NULL || 9756 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 9757 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9758 return (EINVAL); 9759 break; 9760 9761 case DTRACEACT_SPECULATE: 9762 if (ecb->dte_size > sizeof (dtrace_epid_t)) 9763 return (EINVAL); 9764 9765 if (dp == NULL) 9766 return (EINVAL); 9767 9768 state->dts_speculates = 1; 9769 break; 9770 9771 case DTRACEACT_COMMIT: { 9772 dtrace_action_t *act = ecb->dte_action; 9773 9774 for (; act != NULL; act = act->dta_next) { 9775 if (act->dta_kind == DTRACEACT_COMMIT) 9776 return (EINVAL); 9777 } 9778 9779 if (dp == NULL) 9780 return (EINVAL); 9781 break; 9782 } 9783 9784 default: 9785 return (EINVAL); 9786 } 9787 9788 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 9789 /* 9790 * If this is a data-storing action or a speculate, 9791 * we must be sure that there isn't a commit on the 9792 * action chain. 9793 */ 9794 dtrace_action_t *act = ecb->dte_action; 9795 9796 for (; act != NULL; act = act->dta_next) { 9797 if (act->dta_kind == DTRACEACT_COMMIT) 9798 return (EINVAL); 9799 } 9800 } 9801 9802 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 9803 action->dta_rec.dtrd_size = size; 9804 } 9805 9806 action->dta_refcnt = 1; 9807 rec = &action->dta_rec; 9808 size = rec->dtrd_size; 9809 9810 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 9811 if (!(size & mask)) { 9812 align = mask + 1; 9813 break; 9814 } 9815 } 9816 9817 action->dta_kind = desc->dtad_kind; 9818 9819 if ((action->dta_difo = dp) != NULL) 9820 dtrace_difo_hold(dp); 9821 9822 rec->dtrd_action = action->dta_kind; 9823 rec->dtrd_arg = arg; 9824 rec->dtrd_uarg = desc->dtad_uarg; 9825 rec->dtrd_alignment = (uint16_t)align; 9826 rec->dtrd_format = format; 9827 9828 if ((last = ecb->dte_action_last) != NULL) { 9829 ASSERT(ecb->dte_action != NULL); 9830 action->dta_prev = last; 9831 last->dta_next = action; 9832 } else { 9833 ASSERT(ecb->dte_action == NULL); 9834 ecb->dte_action = action; 9835 } 9836 9837 ecb->dte_action_last = action; 9838 9839 return (0); 9840 } 9841 9842 static void 9843 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 9844 { 9845 dtrace_action_t *act = ecb->dte_action, *next; 9846 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 9847 dtrace_difo_t *dp; 9848 uint16_t format; 9849 9850 if (act != NULL && act->dta_refcnt > 1) { 9851 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 9852 act->dta_refcnt--; 9853 } else { 9854 for (; act != NULL; act = next) { 9855 next = act->dta_next; 9856 ASSERT(next != NULL || act == ecb->dte_action_last); 9857 ASSERT(act->dta_refcnt == 1); 9858 9859 if ((format = act->dta_rec.dtrd_format) != 0) 9860 dtrace_format_remove(ecb->dte_state, format); 9861 9862 if ((dp = act->dta_difo) != NULL) 9863 dtrace_difo_release(dp, vstate); 9864 9865 if (DTRACEACT_ISAGG(act->dta_kind)) { 9866 dtrace_ecb_aggregation_destroy(ecb, act); 9867 } else { 9868 kmem_free(act, sizeof (dtrace_action_t)); 9869 } 9870 } 9871 } 9872 9873 ecb->dte_action = NULL; 9874 ecb->dte_action_last = NULL; 9875 ecb->dte_size = sizeof (dtrace_epid_t); 9876 } 9877 9878 static void 9879 dtrace_ecb_disable(dtrace_ecb_t *ecb) 9880 { 9881 /* 9882 * We disable the ECB by removing it from its probe. 9883 */ 9884 dtrace_ecb_t *pecb, *prev = NULL; 9885 dtrace_probe_t *probe = ecb->dte_probe; 9886 9887 ASSERT(MUTEX_HELD(&dtrace_lock)); 9888 9889 if (probe == NULL) { 9890 /* 9891 * This is the NULL probe; there is nothing to disable. 9892 */ 9893 return; 9894 } 9895 9896 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9897 if (pecb == ecb) 9898 break; 9899 prev = pecb; 9900 } 9901 9902 ASSERT(pecb != NULL); 9903 9904 if (prev == NULL) { 9905 probe->dtpr_ecb = ecb->dte_next; 9906 } else { 9907 prev->dte_next = ecb->dte_next; 9908 } 9909 9910 if (ecb == probe->dtpr_ecb_last) { 9911 ASSERT(ecb->dte_next == NULL); 9912 probe->dtpr_ecb_last = prev; 9913 } 9914 9915 /* 9916 * The ECB has been disconnected from the probe; now sync to assure 9917 * that all CPUs have seen the change before returning. 9918 */ 9919 dtrace_sync(); 9920 9921 if (probe->dtpr_ecb == NULL) { 9922 /* 9923 * That was the last ECB on the probe; clear the predicate 9924 * cache ID for the probe, disable it and sync one more time 9925 * to assure that we'll never hit it again. 9926 */ 9927 dtrace_provider_t *prov = probe->dtpr_provider; 9928 9929 ASSERT(ecb->dte_next == NULL); 9930 ASSERT(probe->dtpr_ecb_last == NULL); 9931 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9932 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9933 probe->dtpr_id, probe->dtpr_arg); 9934 dtrace_sync(); 9935 } else { 9936 /* 9937 * There is at least one ECB remaining on the probe. If there 9938 * is _exactly_ one, set the probe's predicate cache ID to be 9939 * the predicate cache ID of the remaining ECB. 9940 */ 9941 ASSERT(probe->dtpr_ecb_last != NULL); 9942 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9943 9944 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9945 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9946 9947 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9948 9949 if (p != NULL) 9950 probe->dtpr_predcache = p->dtp_cacheid; 9951 } 9952 9953 ecb->dte_next = NULL; 9954 } 9955 } 9956 9957 static void 9958 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9959 { 9960 dtrace_state_t *state = ecb->dte_state; 9961 dtrace_vstate_t *vstate = &state->dts_vstate; 9962 dtrace_predicate_t *pred; 9963 dtrace_epid_t epid = ecb->dte_epid; 9964 9965 ASSERT(MUTEX_HELD(&dtrace_lock)); 9966 ASSERT(ecb->dte_next == NULL); 9967 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 9968 9969 if ((pred = ecb->dte_predicate) != NULL) 9970 dtrace_predicate_release(pred, vstate); 9971 9972 dtrace_ecb_action_remove(ecb); 9973 9974 ASSERT(state->dts_ecbs[epid - 1] == ecb); 9975 state->dts_ecbs[epid - 1] = NULL; 9976 9977 kmem_free(ecb, sizeof (dtrace_ecb_t)); 9978 } 9979 9980 static dtrace_ecb_t * 9981 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 9982 dtrace_enabling_t *enab) 9983 { 9984 dtrace_ecb_t *ecb; 9985 dtrace_predicate_t *pred; 9986 dtrace_actdesc_t *act; 9987 dtrace_provider_t *prov; 9988 dtrace_ecbdesc_t *desc = enab->dten_current; 9989 9990 ASSERT(MUTEX_HELD(&dtrace_lock)); 9991 ASSERT(state != NULL); 9992 9993 ecb = dtrace_ecb_add(state, probe); 9994 ecb->dte_uarg = desc->dted_uarg; 9995 9996 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 9997 dtrace_predicate_hold(pred); 9998 ecb->dte_predicate = pred; 9999 } 10000 10001 if (probe != NULL) { 10002 /* 10003 * If the provider shows more leg than the consumer is old 10004 * enough to see, we need to enable the appropriate implicit 10005 * predicate bits to prevent the ecb from activating at 10006 * revealing times. 10007 * 10008 * Providers specifying DTRACE_PRIV_USER at register time 10009 * are stating that they need the /proc-style privilege 10010 * model to be enforced, and this is what DTRACE_COND_OWNER 10011 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10012 */ 10013 prov = probe->dtpr_provider; 10014 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10015 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10016 ecb->dte_cond |= DTRACE_COND_OWNER; 10017 10018 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10019 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10020 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10021 10022 /* 10023 * If the provider shows us kernel innards and the user 10024 * is lacking sufficient privilege, enable the 10025 * DTRACE_COND_USERMODE implicit predicate. 10026 */ 10027 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10028 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10029 ecb->dte_cond |= DTRACE_COND_USERMODE; 10030 } 10031 10032 if (dtrace_ecb_create_cache != NULL) { 10033 /* 10034 * If we have a cached ecb, we'll use its action list instead 10035 * of creating our own (saving both time and space). 10036 */ 10037 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10038 dtrace_action_t *act = cached->dte_action; 10039 10040 if (act != NULL) { 10041 ASSERT(act->dta_refcnt > 0); 10042 act->dta_refcnt++; 10043 ecb->dte_action = act; 10044 ecb->dte_action_last = cached->dte_action_last; 10045 ecb->dte_needed = cached->dte_needed; 10046 ecb->dte_size = cached->dte_size; 10047 ecb->dte_alignment = cached->dte_alignment; 10048 } 10049 10050 return (ecb); 10051 } 10052 10053 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10054 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10055 dtrace_ecb_destroy(ecb); 10056 return (NULL); 10057 } 10058 } 10059 10060 dtrace_ecb_resize(ecb); 10061 10062 return (dtrace_ecb_create_cache = ecb); 10063 } 10064 10065 static int 10066 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10067 { 10068 dtrace_ecb_t *ecb; 10069 dtrace_enabling_t *enab = arg; 10070 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10071 10072 ASSERT(state != NULL); 10073 10074 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10075 /* 10076 * This probe was created in a generation for which this 10077 * enabling has previously created ECBs; we don't want to 10078 * enable it again, so just kick out. 10079 */ 10080 return (DTRACE_MATCH_NEXT); 10081 } 10082 10083 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10084 return (DTRACE_MATCH_DONE); 10085 10086 if (dtrace_ecb_enable(ecb) < 0) 10087 return (DTRACE_MATCH_FAIL); 10088 10089 return (DTRACE_MATCH_NEXT); 10090 } 10091 10092 static dtrace_ecb_t * 10093 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10094 { 10095 dtrace_ecb_t *ecb; 10096 10097 ASSERT(MUTEX_HELD(&dtrace_lock)); 10098 10099 if (id == 0 || id > state->dts_necbs) 10100 return (NULL); 10101 10102 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10103 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10104 10105 return (state->dts_ecbs[id - 1]); 10106 } 10107 10108 static dtrace_aggregation_t * 10109 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10110 { 10111 dtrace_aggregation_t *agg; 10112 10113 ASSERT(MUTEX_HELD(&dtrace_lock)); 10114 10115 if (id == 0 || id > state->dts_naggregations) 10116 return (NULL); 10117 10118 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10119 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10120 agg->dtag_id == id); 10121 10122 return (state->dts_aggregations[id - 1]); 10123 } 10124 10125 /* 10126 * DTrace Buffer Functions 10127 * 10128 * The following functions manipulate DTrace buffers. Most of these functions 10129 * are called in the context of establishing or processing consumer state; 10130 * exceptions are explicitly noted. 10131 */ 10132 10133 /* 10134 * Note: called from cross call context. This function switches the two 10135 * buffers on a given CPU. The atomicity of this operation is assured by 10136 * disabling interrupts while the actual switch takes place; the disabling of 10137 * interrupts serializes the execution with any execution of dtrace_probe() on 10138 * the same CPU. 10139 */ 10140 static void 10141 dtrace_buffer_switch(dtrace_buffer_t *buf) 10142 { 10143 caddr_t tomax = buf->dtb_tomax; 10144 caddr_t xamot = buf->dtb_xamot; 10145 dtrace_icookie_t cookie; 10146 10147 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10148 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10149 10150 cookie = dtrace_interrupt_disable(); 10151 buf->dtb_tomax = xamot; 10152 buf->dtb_xamot = tomax; 10153 buf->dtb_xamot_drops = buf->dtb_drops; 10154 buf->dtb_xamot_offset = buf->dtb_offset; 10155 buf->dtb_xamot_errors = buf->dtb_errors; 10156 buf->dtb_xamot_flags = buf->dtb_flags; 10157 buf->dtb_offset = 0; 10158 buf->dtb_drops = 0; 10159 buf->dtb_errors = 0; 10160 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10161 dtrace_interrupt_enable(cookie); 10162 } 10163 10164 /* 10165 * Note: called from cross call context. This function activates a buffer 10166 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10167 * is guaranteed by the disabling of interrupts. 10168 */ 10169 static void 10170 dtrace_buffer_activate(dtrace_state_t *state) 10171 { 10172 dtrace_buffer_t *buf; 10173 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10174 10175 buf = &state->dts_buffer[CPU->cpu_id]; 10176 10177 if (buf->dtb_tomax != NULL) { 10178 /* 10179 * We might like to assert that the buffer is marked inactive, 10180 * but this isn't necessarily true: the buffer for the CPU 10181 * that processes the BEGIN probe has its buffer activated 10182 * manually. In this case, we take the (harmless) action 10183 * re-clearing the bit INACTIVE bit. 10184 */ 10185 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10186 } 10187 10188 dtrace_interrupt_enable(cookie); 10189 } 10190 10191 static int 10192 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10193 processorid_t cpu) 10194 { 10195 cpu_t *cp; 10196 dtrace_buffer_t *buf; 10197 10198 ASSERT(MUTEX_HELD(&cpu_lock)); 10199 ASSERT(MUTEX_HELD(&dtrace_lock)); 10200 10201 if (size > dtrace_nonroot_maxsize && 10202 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10203 return (EFBIG); 10204 10205 cp = cpu_list; 10206 10207 do { 10208 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10209 continue; 10210 10211 buf = &bufs[cp->cpu_id]; 10212 10213 /* 10214 * If there is already a buffer allocated for this CPU, it 10215 * is only possible that this is a DR event. In this case, 10216 * the buffer size must match our specified size. 10217 */ 10218 if (buf->dtb_tomax != NULL) { 10219 ASSERT(buf->dtb_size == size); 10220 continue; 10221 } 10222 10223 ASSERT(buf->dtb_xamot == NULL); 10224 10225 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10226 goto err; 10227 10228 buf->dtb_size = size; 10229 buf->dtb_flags = flags; 10230 buf->dtb_offset = 0; 10231 buf->dtb_drops = 0; 10232 10233 if (flags & DTRACEBUF_NOSWITCH) 10234 continue; 10235 10236 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10237 goto err; 10238 } while ((cp = cp->cpu_next) != cpu_list); 10239 10240 return (0); 10241 10242 err: 10243 cp = cpu_list; 10244 10245 do { 10246 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10247 continue; 10248 10249 buf = &bufs[cp->cpu_id]; 10250 10251 if (buf->dtb_xamot != NULL) { 10252 ASSERT(buf->dtb_tomax != NULL); 10253 ASSERT(buf->dtb_size == size); 10254 kmem_free(buf->dtb_xamot, size); 10255 } 10256 10257 if (buf->dtb_tomax != NULL) { 10258 ASSERT(buf->dtb_size == size); 10259 kmem_free(buf->dtb_tomax, size); 10260 } 10261 10262 buf->dtb_tomax = NULL; 10263 buf->dtb_xamot = NULL; 10264 buf->dtb_size = 0; 10265 } while ((cp = cp->cpu_next) != cpu_list); 10266 10267 return (ENOMEM); 10268 } 10269 10270 /* 10271 * Note: called from probe context. This function just increments the drop 10272 * count on a buffer. It has been made a function to allow for the 10273 * possibility of understanding the source of mysterious drop counts. (A 10274 * problem for which one may be particularly disappointed that DTrace cannot 10275 * be used to understand DTrace.) 10276 */ 10277 static void 10278 dtrace_buffer_drop(dtrace_buffer_t *buf) 10279 { 10280 buf->dtb_drops++; 10281 } 10282 10283 /* 10284 * Note: called from probe context. This function is called to reserve space 10285 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10286 * mstate. Returns the new offset in the buffer, or a negative value if an 10287 * error has occurred. 10288 */ 10289 static intptr_t 10290 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10291 dtrace_state_t *state, dtrace_mstate_t *mstate) 10292 { 10293 intptr_t offs = buf->dtb_offset, soffs; 10294 intptr_t woffs; 10295 caddr_t tomax; 10296 size_t total; 10297 10298 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10299 return (-1); 10300 10301 if ((tomax = buf->dtb_tomax) == NULL) { 10302 dtrace_buffer_drop(buf); 10303 return (-1); 10304 } 10305 10306 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10307 while (offs & (align - 1)) { 10308 /* 10309 * Assert that our alignment is off by a number which 10310 * is itself sizeof (uint32_t) aligned. 10311 */ 10312 ASSERT(!((align - (offs & (align - 1))) & 10313 (sizeof (uint32_t) - 1))); 10314 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10315 offs += sizeof (uint32_t); 10316 } 10317 10318 if ((soffs = offs + needed) > buf->dtb_size) { 10319 dtrace_buffer_drop(buf); 10320 return (-1); 10321 } 10322 10323 if (mstate == NULL) 10324 return (offs); 10325 10326 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10327 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10328 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10329 10330 return (offs); 10331 } 10332 10333 if (buf->dtb_flags & DTRACEBUF_FILL) { 10334 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10335 (buf->dtb_flags & DTRACEBUF_FULL)) 10336 return (-1); 10337 goto out; 10338 } 10339 10340 total = needed + (offs & (align - 1)); 10341 10342 /* 10343 * For a ring buffer, life is quite a bit more complicated. Before 10344 * we can store any padding, we need to adjust our wrapping offset. 10345 * (If we've never before wrapped or we're not about to, no adjustment 10346 * is required.) 10347 */ 10348 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10349 offs + total > buf->dtb_size) { 10350 woffs = buf->dtb_xamot_offset; 10351 10352 if (offs + total > buf->dtb_size) { 10353 /* 10354 * We can't fit in the end of the buffer. First, a 10355 * sanity check that we can fit in the buffer at all. 10356 */ 10357 if (total > buf->dtb_size) { 10358 dtrace_buffer_drop(buf); 10359 return (-1); 10360 } 10361 10362 /* 10363 * We're going to be storing at the top of the buffer, 10364 * so now we need to deal with the wrapped offset. We 10365 * only reset our wrapped offset to 0 if it is 10366 * currently greater than the current offset. If it 10367 * is less than the current offset, it is because a 10368 * previous allocation induced a wrap -- but the 10369 * allocation didn't subsequently take the space due 10370 * to an error or false predicate evaluation. In this 10371 * case, we'll just leave the wrapped offset alone: if 10372 * the wrapped offset hasn't been advanced far enough 10373 * for this allocation, it will be adjusted in the 10374 * lower loop. 10375 */ 10376 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10377 if (woffs >= offs) 10378 woffs = 0; 10379 } else { 10380 woffs = 0; 10381 } 10382 10383 /* 10384 * Now we know that we're going to be storing to the 10385 * top of the buffer and that there is room for us 10386 * there. We need to clear the buffer from the current 10387 * offset to the end (there may be old gunk there). 10388 */ 10389 while (offs < buf->dtb_size) 10390 tomax[offs++] = 0; 10391 10392 /* 10393 * We need to set our offset to zero. And because we 10394 * are wrapping, we need to set the bit indicating as 10395 * much. We can also adjust our needed space back 10396 * down to the space required by the ECB -- we know 10397 * that the top of the buffer is aligned. 10398 */ 10399 offs = 0; 10400 total = needed; 10401 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10402 } else { 10403 /* 10404 * There is room for us in the buffer, so we simply 10405 * need to check the wrapped offset. 10406 */ 10407 if (woffs < offs) { 10408 /* 10409 * The wrapped offset is less than the offset. 10410 * This can happen if we allocated buffer space 10411 * that induced a wrap, but then we didn't 10412 * subsequently take the space due to an error 10413 * or false predicate evaluation. This is 10414 * okay; we know that _this_ allocation isn't 10415 * going to induce a wrap. We still can't 10416 * reset the wrapped offset to be zero, 10417 * however: the space may have been trashed in 10418 * the previous failed probe attempt. But at 10419 * least the wrapped offset doesn't need to 10420 * be adjusted at all... 10421 */ 10422 goto out; 10423 } 10424 } 10425 10426 while (offs + total > woffs) { 10427 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10428 size_t size; 10429 10430 if (epid == DTRACE_EPIDNONE) { 10431 size = sizeof (uint32_t); 10432 } else { 10433 ASSERT(epid <= state->dts_necbs); 10434 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10435 10436 size = state->dts_ecbs[epid - 1]->dte_size; 10437 } 10438 10439 ASSERT(woffs + size <= buf->dtb_size); 10440 ASSERT(size != 0); 10441 10442 if (woffs + size == buf->dtb_size) { 10443 /* 10444 * We've reached the end of the buffer; we want 10445 * to set the wrapped offset to 0 and break 10446 * out. However, if the offs is 0, then we're 10447 * in a strange edge-condition: the amount of 10448 * space that we want to reserve plus the size 10449 * of the record that we're overwriting is 10450 * greater than the size of the buffer. This 10451 * is problematic because if we reserve the 10452 * space but subsequently don't consume it (due 10453 * to a failed predicate or error) the wrapped 10454 * offset will be 0 -- yet the EPID at offset 0 10455 * will not be committed. This situation is 10456 * relatively easy to deal with: if we're in 10457 * this case, the buffer is indistinguishable 10458 * from one that hasn't wrapped; we need only 10459 * finish the job by clearing the wrapped bit, 10460 * explicitly setting the offset to be 0, and 10461 * zero'ing out the old data in the buffer. 10462 */ 10463 if (offs == 0) { 10464 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10465 buf->dtb_offset = 0; 10466 woffs = total; 10467 10468 while (woffs < buf->dtb_size) 10469 tomax[woffs++] = 0; 10470 } 10471 10472 woffs = 0; 10473 break; 10474 } 10475 10476 woffs += size; 10477 } 10478 10479 /* 10480 * We have a wrapped offset. It may be that the wrapped offset 10481 * has become zero -- that's okay. 10482 */ 10483 buf->dtb_xamot_offset = woffs; 10484 } 10485 10486 out: 10487 /* 10488 * Now we can plow the buffer with any necessary padding. 10489 */ 10490 while (offs & (align - 1)) { 10491 /* 10492 * Assert that our alignment is off by a number which 10493 * is itself sizeof (uint32_t) aligned. 10494 */ 10495 ASSERT(!((align - (offs & (align - 1))) & 10496 (sizeof (uint32_t) - 1))); 10497 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10498 offs += sizeof (uint32_t); 10499 } 10500 10501 if (buf->dtb_flags & DTRACEBUF_FILL) { 10502 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10503 buf->dtb_flags |= DTRACEBUF_FULL; 10504 return (-1); 10505 } 10506 } 10507 10508 if (mstate == NULL) 10509 return (offs); 10510 10511 /* 10512 * For ring buffers and fill buffers, the scratch space is always 10513 * the inactive buffer. 10514 */ 10515 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10516 mstate->dtms_scratch_size = buf->dtb_size; 10517 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10518 10519 return (offs); 10520 } 10521 10522 static void 10523 dtrace_buffer_polish(dtrace_buffer_t *buf) 10524 { 10525 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10526 ASSERT(MUTEX_HELD(&dtrace_lock)); 10527 10528 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10529 return; 10530 10531 /* 10532 * We need to polish the ring buffer. There are three cases: 10533 * 10534 * - The first (and presumably most common) is that there is no gap 10535 * between the buffer offset and the wrapped offset. In this case, 10536 * there is nothing in the buffer that isn't valid data; we can 10537 * mark the buffer as polished and return. 10538 * 10539 * - The second (less common than the first but still more common 10540 * than the third) is that there is a gap between the buffer offset 10541 * and the wrapped offset, and the wrapped offset is larger than the 10542 * buffer offset. This can happen because of an alignment issue, or 10543 * can happen because of a call to dtrace_buffer_reserve() that 10544 * didn't subsequently consume the buffer space. In this case, 10545 * we need to zero the data from the buffer offset to the wrapped 10546 * offset. 10547 * 10548 * - The third (and least common) is that there is a gap between the 10549 * buffer offset and the wrapped offset, but the wrapped offset is 10550 * _less_ than the buffer offset. This can only happen because a 10551 * call to dtrace_buffer_reserve() induced a wrap, but the space 10552 * was not subsequently consumed. In this case, we need to zero the 10553 * space from the offset to the end of the buffer _and_ from the 10554 * top of the buffer to the wrapped offset. 10555 */ 10556 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10557 bzero(buf->dtb_tomax + buf->dtb_offset, 10558 buf->dtb_xamot_offset - buf->dtb_offset); 10559 } 10560 10561 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10562 bzero(buf->dtb_tomax + buf->dtb_offset, 10563 buf->dtb_size - buf->dtb_offset); 10564 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10565 } 10566 } 10567 10568 static void 10569 dtrace_buffer_free(dtrace_buffer_t *bufs) 10570 { 10571 int i; 10572 10573 for (i = 0; i < NCPU; i++) { 10574 dtrace_buffer_t *buf = &bufs[i]; 10575 10576 if (buf->dtb_tomax == NULL) { 10577 ASSERT(buf->dtb_xamot == NULL); 10578 ASSERT(buf->dtb_size == 0); 10579 continue; 10580 } 10581 10582 if (buf->dtb_xamot != NULL) { 10583 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10584 kmem_free(buf->dtb_xamot, buf->dtb_size); 10585 } 10586 10587 kmem_free(buf->dtb_tomax, buf->dtb_size); 10588 buf->dtb_size = 0; 10589 buf->dtb_tomax = NULL; 10590 buf->dtb_xamot = NULL; 10591 } 10592 } 10593 10594 /* 10595 * DTrace Enabling Functions 10596 */ 10597 static dtrace_enabling_t * 10598 dtrace_enabling_create(dtrace_vstate_t *vstate) 10599 { 10600 dtrace_enabling_t *enab; 10601 10602 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10603 enab->dten_vstate = vstate; 10604 10605 return (enab); 10606 } 10607 10608 static void 10609 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10610 { 10611 dtrace_ecbdesc_t **ndesc; 10612 size_t osize, nsize; 10613 10614 /* 10615 * We can't add to enablings after we've enabled them, or after we've 10616 * retained them. 10617 */ 10618 ASSERT(enab->dten_probegen == 0); 10619 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10620 10621 if (enab->dten_ndesc < enab->dten_maxdesc) { 10622 enab->dten_desc[enab->dten_ndesc++] = ecb; 10623 return; 10624 } 10625 10626 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10627 10628 if (enab->dten_maxdesc == 0) { 10629 enab->dten_maxdesc = 1; 10630 } else { 10631 enab->dten_maxdesc <<= 1; 10632 } 10633 10634 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 10635 10636 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10637 ndesc = kmem_zalloc(nsize, KM_SLEEP); 10638 bcopy(enab->dten_desc, ndesc, osize); 10639 kmem_free(enab->dten_desc, osize); 10640 10641 enab->dten_desc = ndesc; 10642 enab->dten_desc[enab->dten_ndesc++] = ecb; 10643 } 10644 10645 static void 10646 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 10647 dtrace_probedesc_t *pd) 10648 { 10649 dtrace_ecbdesc_t *new; 10650 dtrace_predicate_t *pred; 10651 dtrace_actdesc_t *act; 10652 10653 /* 10654 * We're going to create a new ECB description that matches the 10655 * specified ECB in every way, but has the specified probe description. 10656 */ 10657 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10658 10659 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 10660 dtrace_predicate_hold(pred); 10661 10662 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 10663 dtrace_actdesc_hold(act); 10664 10665 new->dted_action = ecb->dted_action; 10666 new->dted_pred = ecb->dted_pred; 10667 new->dted_probe = *pd; 10668 new->dted_uarg = ecb->dted_uarg; 10669 10670 dtrace_enabling_add(enab, new); 10671 } 10672 10673 static void 10674 dtrace_enabling_dump(dtrace_enabling_t *enab) 10675 { 10676 int i; 10677 10678 for (i = 0; i < enab->dten_ndesc; i++) { 10679 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 10680 10681 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 10682 desc->dtpd_provider, desc->dtpd_mod, 10683 desc->dtpd_func, desc->dtpd_name); 10684 } 10685 } 10686 10687 static void 10688 dtrace_enabling_destroy(dtrace_enabling_t *enab) 10689 { 10690 int i; 10691 dtrace_ecbdesc_t *ep; 10692 dtrace_vstate_t *vstate = enab->dten_vstate; 10693 10694 ASSERT(MUTEX_HELD(&dtrace_lock)); 10695 10696 for (i = 0; i < enab->dten_ndesc; i++) { 10697 dtrace_actdesc_t *act, *next; 10698 dtrace_predicate_t *pred; 10699 10700 ep = enab->dten_desc[i]; 10701 10702 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 10703 dtrace_predicate_release(pred, vstate); 10704 10705 for (act = ep->dted_action; act != NULL; act = next) { 10706 next = act->dtad_next; 10707 dtrace_actdesc_release(act, vstate); 10708 } 10709 10710 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10711 } 10712 10713 kmem_free(enab->dten_desc, 10714 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 10715 10716 /* 10717 * If this was a retained enabling, decrement the dts_nretained count 10718 * and take it off of the dtrace_retained list. 10719 */ 10720 if (enab->dten_prev != NULL || enab->dten_next != NULL || 10721 dtrace_retained == enab) { 10722 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10723 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 10724 enab->dten_vstate->dtvs_state->dts_nretained--; 10725 dtrace_retained_gen++; 10726 } 10727 10728 if (enab->dten_prev == NULL) { 10729 if (dtrace_retained == enab) { 10730 dtrace_retained = enab->dten_next; 10731 10732 if (dtrace_retained != NULL) 10733 dtrace_retained->dten_prev = NULL; 10734 } 10735 } else { 10736 ASSERT(enab != dtrace_retained); 10737 ASSERT(dtrace_retained != NULL); 10738 enab->dten_prev->dten_next = enab->dten_next; 10739 } 10740 10741 if (enab->dten_next != NULL) { 10742 ASSERT(dtrace_retained != NULL); 10743 enab->dten_next->dten_prev = enab->dten_prev; 10744 } 10745 10746 kmem_free(enab, sizeof (dtrace_enabling_t)); 10747 } 10748 10749 static int 10750 dtrace_enabling_retain(dtrace_enabling_t *enab) 10751 { 10752 dtrace_state_t *state; 10753 10754 ASSERT(MUTEX_HELD(&dtrace_lock)); 10755 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10756 ASSERT(enab->dten_vstate != NULL); 10757 10758 state = enab->dten_vstate->dtvs_state; 10759 ASSERT(state != NULL); 10760 10761 /* 10762 * We only allow each state to retain dtrace_retain_max enablings. 10763 */ 10764 if (state->dts_nretained >= dtrace_retain_max) 10765 return (ENOSPC); 10766 10767 state->dts_nretained++; 10768 dtrace_retained_gen++; 10769 10770 if (dtrace_retained == NULL) { 10771 dtrace_retained = enab; 10772 return (0); 10773 } 10774 10775 enab->dten_next = dtrace_retained; 10776 dtrace_retained->dten_prev = enab; 10777 dtrace_retained = enab; 10778 10779 return (0); 10780 } 10781 10782 static int 10783 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 10784 dtrace_probedesc_t *create) 10785 { 10786 dtrace_enabling_t *new, *enab; 10787 int found = 0, err = ENOENT; 10788 10789 ASSERT(MUTEX_HELD(&dtrace_lock)); 10790 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 10791 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 10792 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 10793 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 10794 10795 new = dtrace_enabling_create(&state->dts_vstate); 10796 10797 /* 10798 * Iterate over all retained enablings, looking for enablings that 10799 * match the specified state. 10800 */ 10801 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10802 int i; 10803 10804 /* 10805 * dtvs_state can only be NULL for helper enablings -- and 10806 * helper enablings can't be retained. 10807 */ 10808 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10809 10810 if (enab->dten_vstate->dtvs_state != state) 10811 continue; 10812 10813 /* 10814 * Now iterate over each probe description; we're looking for 10815 * an exact match to the specified probe description. 10816 */ 10817 for (i = 0; i < enab->dten_ndesc; i++) { 10818 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10819 dtrace_probedesc_t *pd = &ep->dted_probe; 10820 10821 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 10822 continue; 10823 10824 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 10825 continue; 10826 10827 if (strcmp(pd->dtpd_func, match->dtpd_func)) 10828 continue; 10829 10830 if (strcmp(pd->dtpd_name, match->dtpd_name)) 10831 continue; 10832 10833 /* 10834 * We have a winning probe! Add it to our growing 10835 * enabling. 10836 */ 10837 found = 1; 10838 dtrace_enabling_addlike(new, ep, create); 10839 } 10840 } 10841 10842 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 10843 dtrace_enabling_destroy(new); 10844 return (err); 10845 } 10846 10847 return (0); 10848 } 10849 10850 static void 10851 dtrace_enabling_retract(dtrace_state_t *state) 10852 { 10853 dtrace_enabling_t *enab, *next; 10854 10855 ASSERT(MUTEX_HELD(&dtrace_lock)); 10856 10857 /* 10858 * Iterate over all retained enablings, destroy the enablings retained 10859 * for the specified state. 10860 */ 10861 for (enab = dtrace_retained; enab != NULL; enab = next) { 10862 next = enab->dten_next; 10863 10864 /* 10865 * dtvs_state can only be NULL for helper enablings -- and 10866 * helper enablings can't be retained. 10867 */ 10868 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10869 10870 if (enab->dten_vstate->dtvs_state == state) { 10871 ASSERT(state->dts_nretained > 0); 10872 dtrace_enabling_destroy(enab); 10873 } 10874 } 10875 10876 ASSERT(state->dts_nretained == 0); 10877 } 10878 10879 static int 10880 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 10881 { 10882 int i = 0; 10883 int total_matched = 0, matched = 0; 10884 10885 ASSERT(MUTEX_HELD(&cpu_lock)); 10886 ASSERT(MUTEX_HELD(&dtrace_lock)); 10887 10888 for (i = 0; i < enab->dten_ndesc; i++) { 10889 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10890 10891 enab->dten_current = ep; 10892 enab->dten_error = 0; 10893 10894 /* 10895 * If a provider failed to enable a probe then get out and 10896 * let the consumer know we failed. 10897 */ 10898 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0) 10899 return (EBUSY); 10900 10901 total_matched += matched; 10902 10903 if (enab->dten_error != 0) { 10904 /* 10905 * If we get an error half-way through enabling the 10906 * probes, we kick out -- perhaps with some number of 10907 * them enabled. Leaving enabled probes enabled may 10908 * be slightly confusing for user-level, but we expect 10909 * that no one will attempt to actually drive on in 10910 * the face of such errors. If this is an anonymous 10911 * enabling (indicated with a NULL nmatched pointer), 10912 * we cmn_err() a message. We aren't expecting to 10913 * get such an error -- such as it can exist at all, 10914 * it would be a result of corrupted DOF in the driver 10915 * properties. 10916 */ 10917 if (nmatched == NULL) { 10918 cmn_err(CE_WARN, "dtrace_enabling_match() " 10919 "error on %p: %d", (void *)ep, 10920 enab->dten_error); 10921 } 10922 10923 return (enab->dten_error); 10924 } 10925 } 10926 10927 enab->dten_probegen = dtrace_probegen; 10928 if (nmatched != NULL) 10929 *nmatched = total_matched; 10930 10931 return (0); 10932 } 10933 10934 static void 10935 dtrace_enabling_matchall(void) 10936 { 10937 dtrace_enabling_t *enab; 10938 10939 mutex_enter(&cpu_lock); 10940 mutex_enter(&dtrace_lock); 10941 10942 /* 10943 * Iterate over all retained enablings to see if any probes match 10944 * against them. We only perform this operation on enablings for which 10945 * we have sufficient permissions by virtue of being in the global zone 10946 * or in the same zone as the DTrace client. Because we can be called 10947 * after dtrace_detach() has been called, we cannot assert that there 10948 * are retained enablings. We can safely load from dtrace_retained, 10949 * however: the taskq_destroy() at the end of dtrace_detach() will 10950 * block pending our completion. 10951 */ 10952 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10953 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred; 10954 cred_t *cr = dcr->dcr_cred; 10955 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0; 10956 10957 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL && 10958 (zone == GLOBAL_ZONEID || getzoneid() == zone))) 10959 (void) dtrace_enabling_match(enab, NULL); 10960 } 10961 10962 mutex_exit(&dtrace_lock); 10963 mutex_exit(&cpu_lock); 10964 } 10965 10966 /* 10967 * If an enabling is to be enabled without having matched probes (that is, if 10968 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 10969 * enabling must be _primed_ by creating an ECB for every ECB description. 10970 * This must be done to assure that we know the number of speculations, the 10971 * number of aggregations, the minimum buffer size needed, etc. before we 10972 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 10973 * enabling any probes, we create ECBs for every ECB decription, but with a 10974 * NULL probe -- which is exactly what this function does. 10975 */ 10976 static void 10977 dtrace_enabling_prime(dtrace_state_t *state) 10978 { 10979 dtrace_enabling_t *enab; 10980 int i; 10981 10982 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10983 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10984 10985 if (enab->dten_vstate->dtvs_state != state) 10986 continue; 10987 10988 /* 10989 * We don't want to prime an enabling more than once, lest 10990 * we allow a malicious user to induce resource exhaustion. 10991 * (The ECBs that result from priming an enabling aren't 10992 * leaked -- but they also aren't deallocated until the 10993 * consumer state is destroyed.) 10994 */ 10995 if (enab->dten_primed) 10996 continue; 10997 10998 for (i = 0; i < enab->dten_ndesc; i++) { 10999 enab->dten_current = enab->dten_desc[i]; 11000 (void) dtrace_probe_enable(NULL, enab); 11001 } 11002 11003 enab->dten_primed = 1; 11004 } 11005 } 11006 11007 /* 11008 * Called to indicate that probes should be provided due to retained 11009 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11010 * must take an initial lap through the enabling calling the dtps_provide() 11011 * entry point explicitly to allow for autocreated probes. 11012 */ 11013 static void 11014 dtrace_enabling_provide(dtrace_provider_t *prv) 11015 { 11016 int i, all = 0; 11017 dtrace_probedesc_t desc; 11018 dtrace_genid_t gen; 11019 11020 ASSERT(MUTEX_HELD(&dtrace_lock)); 11021 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11022 11023 if (prv == NULL) { 11024 all = 1; 11025 prv = dtrace_provider; 11026 } 11027 11028 do { 11029 dtrace_enabling_t *enab; 11030 void *parg = prv->dtpv_arg; 11031 11032 retry: 11033 gen = dtrace_retained_gen; 11034 for (enab = dtrace_retained; enab != NULL; 11035 enab = enab->dten_next) { 11036 for (i = 0; i < enab->dten_ndesc; i++) { 11037 desc = enab->dten_desc[i]->dted_probe; 11038 mutex_exit(&dtrace_lock); 11039 prv->dtpv_pops.dtps_provide(parg, &desc); 11040 mutex_enter(&dtrace_lock); 11041 /* 11042 * Process the retained enablings again if 11043 * they have changed while we weren't holding 11044 * dtrace_lock. 11045 */ 11046 if (gen != dtrace_retained_gen) 11047 goto retry; 11048 } 11049 } 11050 } while (all && (prv = prv->dtpv_next) != NULL); 11051 11052 mutex_exit(&dtrace_lock); 11053 dtrace_probe_provide(NULL, all ? NULL : prv); 11054 mutex_enter(&dtrace_lock); 11055 } 11056 11057 /* 11058 * DTrace DOF Functions 11059 */ 11060 /*ARGSUSED*/ 11061 static void 11062 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11063 { 11064 if (dtrace_err_verbose) 11065 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11066 11067 #ifdef DTRACE_ERRDEBUG 11068 dtrace_errdebug(str); 11069 #endif 11070 } 11071 11072 /* 11073 * Create DOF out of a currently enabled state. Right now, we only create 11074 * DOF containing the run-time options -- but this could be expanded to create 11075 * complete DOF representing the enabled state. 11076 */ 11077 static dof_hdr_t * 11078 dtrace_dof_create(dtrace_state_t *state) 11079 { 11080 dof_hdr_t *dof; 11081 dof_sec_t *sec; 11082 dof_optdesc_t *opt; 11083 int i, len = sizeof (dof_hdr_t) + 11084 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11085 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11086 11087 ASSERT(MUTEX_HELD(&dtrace_lock)); 11088 11089 dof = kmem_zalloc(len, KM_SLEEP); 11090 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11091 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11092 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11093 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11094 11095 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11096 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11097 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11098 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11099 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11100 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11101 11102 dof->dofh_flags = 0; 11103 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11104 dof->dofh_secsize = sizeof (dof_sec_t); 11105 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11106 dof->dofh_secoff = sizeof (dof_hdr_t); 11107 dof->dofh_loadsz = len; 11108 dof->dofh_filesz = len; 11109 dof->dofh_pad = 0; 11110 11111 /* 11112 * Fill in the option section header... 11113 */ 11114 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11115 sec->dofs_type = DOF_SECT_OPTDESC; 11116 sec->dofs_align = sizeof (uint64_t); 11117 sec->dofs_flags = DOF_SECF_LOAD; 11118 sec->dofs_entsize = sizeof (dof_optdesc_t); 11119 11120 opt = (dof_optdesc_t *)((uintptr_t)sec + 11121 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11122 11123 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11124 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11125 11126 for (i = 0; i < DTRACEOPT_MAX; i++) { 11127 opt[i].dofo_option = i; 11128 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11129 opt[i].dofo_value = state->dts_options[i]; 11130 } 11131 11132 return (dof); 11133 } 11134 11135 static dof_hdr_t * 11136 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11137 { 11138 dof_hdr_t hdr, *dof; 11139 11140 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11141 11142 /* 11143 * First, we're going to copyin() the sizeof (dof_hdr_t). 11144 */ 11145 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11146 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11147 *errp = EFAULT; 11148 return (NULL); 11149 } 11150 11151 /* 11152 * Now we'll allocate the entire DOF and copy it in -- provided 11153 * that the length isn't outrageous. 11154 */ 11155 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11156 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11157 *errp = E2BIG; 11158 return (NULL); 11159 } 11160 11161 if (hdr.dofh_loadsz < sizeof (hdr)) { 11162 dtrace_dof_error(&hdr, "invalid load size"); 11163 *errp = EINVAL; 11164 return (NULL); 11165 } 11166 11167 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11168 11169 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 11170 dof->dofh_loadsz != hdr.dofh_loadsz) { 11171 kmem_free(dof, hdr.dofh_loadsz); 11172 *errp = EFAULT; 11173 return (NULL); 11174 } 11175 11176 return (dof); 11177 } 11178 11179 static dof_hdr_t * 11180 dtrace_dof_property(const char *name) 11181 { 11182 uchar_t *buf; 11183 uint64_t loadsz; 11184 unsigned int len, i; 11185 dof_hdr_t *dof; 11186 11187 /* 11188 * Unfortunately, array of values in .conf files are always (and 11189 * only) interpreted to be integer arrays. We must read our DOF 11190 * as an integer array, and then squeeze it into a byte array. 11191 */ 11192 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11193 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11194 return (NULL); 11195 11196 for (i = 0; i < len; i++) 11197 buf[i] = (uchar_t)(((int *)buf)[i]); 11198 11199 if (len < sizeof (dof_hdr_t)) { 11200 ddi_prop_free(buf); 11201 dtrace_dof_error(NULL, "truncated header"); 11202 return (NULL); 11203 } 11204 11205 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11206 ddi_prop_free(buf); 11207 dtrace_dof_error(NULL, "truncated DOF"); 11208 return (NULL); 11209 } 11210 11211 if (loadsz >= dtrace_dof_maxsize) { 11212 ddi_prop_free(buf); 11213 dtrace_dof_error(NULL, "oversized DOF"); 11214 return (NULL); 11215 } 11216 11217 dof = kmem_alloc(loadsz, KM_SLEEP); 11218 bcopy(buf, dof, loadsz); 11219 ddi_prop_free(buf); 11220 11221 return (dof); 11222 } 11223 11224 static void 11225 dtrace_dof_destroy(dof_hdr_t *dof) 11226 { 11227 kmem_free(dof, dof->dofh_loadsz); 11228 } 11229 11230 /* 11231 * Return the dof_sec_t pointer corresponding to a given section index. If the 11232 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11233 * a type other than DOF_SECT_NONE is specified, the header is checked against 11234 * this type and NULL is returned if the types do not match. 11235 */ 11236 static dof_sec_t * 11237 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11238 { 11239 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11240 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11241 11242 if (i >= dof->dofh_secnum) { 11243 dtrace_dof_error(dof, "referenced section index is invalid"); 11244 return (NULL); 11245 } 11246 11247 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11248 dtrace_dof_error(dof, "referenced section is not loadable"); 11249 return (NULL); 11250 } 11251 11252 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11253 dtrace_dof_error(dof, "referenced section is the wrong type"); 11254 return (NULL); 11255 } 11256 11257 return (sec); 11258 } 11259 11260 static dtrace_probedesc_t * 11261 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11262 { 11263 dof_probedesc_t *probe; 11264 dof_sec_t *strtab; 11265 uintptr_t daddr = (uintptr_t)dof; 11266 uintptr_t str; 11267 size_t size; 11268 11269 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11270 dtrace_dof_error(dof, "invalid probe section"); 11271 return (NULL); 11272 } 11273 11274 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11275 dtrace_dof_error(dof, "bad alignment in probe description"); 11276 return (NULL); 11277 } 11278 11279 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11280 dtrace_dof_error(dof, "truncated probe description"); 11281 return (NULL); 11282 } 11283 11284 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11285 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11286 11287 if (strtab == NULL) 11288 return (NULL); 11289 11290 str = daddr + strtab->dofs_offset; 11291 size = strtab->dofs_size; 11292 11293 if (probe->dofp_provider >= strtab->dofs_size) { 11294 dtrace_dof_error(dof, "corrupt probe provider"); 11295 return (NULL); 11296 } 11297 11298 (void) strncpy(desc->dtpd_provider, 11299 (char *)(str + probe->dofp_provider), 11300 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11301 11302 if (probe->dofp_mod >= strtab->dofs_size) { 11303 dtrace_dof_error(dof, "corrupt probe module"); 11304 return (NULL); 11305 } 11306 11307 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11308 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11309 11310 if (probe->dofp_func >= strtab->dofs_size) { 11311 dtrace_dof_error(dof, "corrupt probe function"); 11312 return (NULL); 11313 } 11314 11315 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11316 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11317 11318 if (probe->dofp_name >= strtab->dofs_size) { 11319 dtrace_dof_error(dof, "corrupt probe name"); 11320 return (NULL); 11321 } 11322 11323 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11324 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11325 11326 return (desc); 11327 } 11328 11329 static dtrace_difo_t * 11330 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11331 cred_t *cr) 11332 { 11333 dtrace_difo_t *dp; 11334 size_t ttl = 0; 11335 dof_difohdr_t *dofd; 11336 uintptr_t daddr = (uintptr_t)dof; 11337 size_t max = dtrace_difo_maxsize; 11338 int i, l, n; 11339 11340 static const struct { 11341 int section; 11342 int bufoffs; 11343 int lenoffs; 11344 int entsize; 11345 int align; 11346 const char *msg; 11347 } difo[] = { 11348 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11349 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11350 sizeof (dif_instr_t), "multiple DIF sections" }, 11351 11352 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11353 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11354 sizeof (uint64_t), "multiple integer tables" }, 11355 11356 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11357 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11358 sizeof (char), "multiple string tables" }, 11359 11360 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11361 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11362 sizeof (uint_t), "multiple variable tables" }, 11363 11364 { DOF_SECT_NONE, 0, 0, 0, NULL } 11365 }; 11366 11367 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11368 dtrace_dof_error(dof, "invalid DIFO header section"); 11369 return (NULL); 11370 } 11371 11372 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11373 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11374 return (NULL); 11375 } 11376 11377 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11378 sec->dofs_size % sizeof (dof_secidx_t)) { 11379 dtrace_dof_error(dof, "bad size in DIFO header"); 11380 return (NULL); 11381 } 11382 11383 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11384 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11385 11386 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11387 dp->dtdo_rtype = dofd->dofd_rtype; 11388 11389 for (l = 0; l < n; l++) { 11390 dof_sec_t *subsec; 11391 void **bufp; 11392 uint32_t *lenp; 11393 11394 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11395 dofd->dofd_links[l])) == NULL) 11396 goto err; /* invalid section link */ 11397 11398 if (ttl + subsec->dofs_size > max) { 11399 dtrace_dof_error(dof, "exceeds maximum size"); 11400 goto err; 11401 } 11402 11403 ttl += subsec->dofs_size; 11404 11405 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11406 if (subsec->dofs_type != difo[i].section) 11407 continue; 11408 11409 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11410 dtrace_dof_error(dof, "section not loaded"); 11411 goto err; 11412 } 11413 11414 if (subsec->dofs_align != difo[i].align) { 11415 dtrace_dof_error(dof, "bad alignment"); 11416 goto err; 11417 } 11418 11419 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11420 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11421 11422 if (*bufp != NULL) { 11423 dtrace_dof_error(dof, difo[i].msg); 11424 goto err; 11425 } 11426 11427 if (difo[i].entsize != subsec->dofs_entsize) { 11428 dtrace_dof_error(dof, "entry size mismatch"); 11429 goto err; 11430 } 11431 11432 if (subsec->dofs_entsize != 0 && 11433 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11434 dtrace_dof_error(dof, "corrupt entry size"); 11435 goto err; 11436 } 11437 11438 *lenp = subsec->dofs_size; 11439 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11440 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11441 *bufp, subsec->dofs_size); 11442 11443 if (subsec->dofs_entsize != 0) 11444 *lenp /= subsec->dofs_entsize; 11445 11446 break; 11447 } 11448 11449 /* 11450 * If we encounter a loadable DIFO sub-section that is not 11451 * known to us, assume this is a broken program and fail. 11452 */ 11453 if (difo[i].section == DOF_SECT_NONE && 11454 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11455 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11456 goto err; 11457 } 11458 } 11459 11460 if (dp->dtdo_buf == NULL) { 11461 /* 11462 * We can't have a DIF object without DIF text. 11463 */ 11464 dtrace_dof_error(dof, "missing DIF text"); 11465 goto err; 11466 } 11467 11468 /* 11469 * Before we validate the DIF object, run through the variable table 11470 * looking for the strings -- if any of their size are under, we'll set 11471 * their size to be the system-wide default string size. Note that 11472 * this should _not_ happen if the "strsize" option has been set -- 11473 * in this case, the compiler should have set the size to reflect the 11474 * setting of the option. 11475 */ 11476 for (i = 0; i < dp->dtdo_varlen; i++) { 11477 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 11478 dtrace_diftype_t *t = &v->dtdv_type; 11479 11480 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 11481 continue; 11482 11483 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 11484 t->dtdt_size = dtrace_strsize_default; 11485 } 11486 11487 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 11488 goto err; 11489 11490 dtrace_difo_init(dp, vstate); 11491 return (dp); 11492 11493 err: 11494 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 11495 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 11496 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 11497 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 11498 11499 kmem_free(dp, sizeof (dtrace_difo_t)); 11500 return (NULL); 11501 } 11502 11503 static dtrace_predicate_t * 11504 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11505 cred_t *cr) 11506 { 11507 dtrace_difo_t *dp; 11508 11509 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 11510 return (NULL); 11511 11512 return (dtrace_predicate_create(dp)); 11513 } 11514 11515 static dtrace_actdesc_t * 11516 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11517 cred_t *cr) 11518 { 11519 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 11520 dof_actdesc_t *desc; 11521 dof_sec_t *difosec; 11522 size_t offs; 11523 uintptr_t daddr = (uintptr_t)dof; 11524 uint64_t arg; 11525 dtrace_actkind_t kind; 11526 11527 if (sec->dofs_type != DOF_SECT_ACTDESC) { 11528 dtrace_dof_error(dof, "invalid action section"); 11529 return (NULL); 11530 } 11531 11532 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 11533 dtrace_dof_error(dof, "truncated action description"); 11534 return (NULL); 11535 } 11536 11537 if (sec->dofs_align != sizeof (uint64_t)) { 11538 dtrace_dof_error(dof, "bad alignment in action description"); 11539 return (NULL); 11540 } 11541 11542 if (sec->dofs_size < sec->dofs_entsize) { 11543 dtrace_dof_error(dof, "section entry size exceeds total size"); 11544 return (NULL); 11545 } 11546 11547 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 11548 dtrace_dof_error(dof, "bad entry size in action description"); 11549 return (NULL); 11550 } 11551 11552 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 11553 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 11554 return (NULL); 11555 } 11556 11557 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 11558 desc = (dof_actdesc_t *)(daddr + 11559 (uintptr_t)sec->dofs_offset + offs); 11560 kind = (dtrace_actkind_t)desc->dofa_kind; 11561 11562 if (DTRACEACT_ISPRINTFLIKE(kind) && 11563 (kind != DTRACEACT_PRINTA || 11564 desc->dofa_strtab != DOF_SECIDX_NONE)) { 11565 dof_sec_t *strtab; 11566 char *str, *fmt; 11567 uint64_t i; 11568 11569 /* 11570 * printf()-like actions must have a format string. 11571 */ 11572 if ((strtab = dtrace_dof_sect(dof, 11573 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 11574 goto err; 11575 11576 str = (char *)((uintptr_t)dof + 11577 (uintptr_t)strtab->dofs_offset); 11578 11579 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 11580 if (str[i] == '\0') 11581 break; 11582 } 11583 11584 if (i >= strtab->dofs_size) { 11585 dtrace_dof_error(dof, "bogus format string"); 11586 goto err; 11587 } 11588 11589 if (i == desc->dofa_arg) { 11590 dtrace_dof_error(dof, "empty format string"); 11591 goto err; 11592 } 11593 11594 i -= desc->dofa_arg; 11595 fmt = kmem_alloc(i + 1, KM_SLEEP); 11596 bcopy(&str[desc->dofa_arg], fmt, i + 1); 11597 arg = (uint64_t)(uintptr_t)fmt; 11598 } else { 11599 if (kind == DTRACEACT_PRINTA) { 11600 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 11601 arg = 0; 11602 } else { 11603 arg = desc->dofa_arg; 11604 } 11605 } 11606 11607 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 11608 desc->dofa_uarg, arg); 11609 11610 if (last != NULL) { 11611 last->dtad_next = act; 11612 } else { 11613 first = act; 11614 } 11615 11616 last = act; 11617 11618 if (desc->dofa_difo == DOF_SECIDX_NONE) 11619 continue; 11620 11621 if ((difosec = dtrace_dof_sect(dof, 11622 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 11623 goto err; 11624 11625 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 11626 11627 if (act->dtad_difo == NULL) 11628 goto err; 11629 } 11630 11631 ASSERT(first != NULL); 11632 return (first); 11633 11634 err: 11635 for (act = first; act != NULL; act = next) { 11636 next = act->dtad_next; 11637 dtrace_actdesc_release(act, vstate); 11638 } 11639 11640 return (NULL); 11641 } 11642 11643 static dtrace_ecbdesc_t * 11644 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11645 cred_t *cr) 11646 { 11647 dtrace_ecbdesc_t *ep; 11648 dof_ecbdesc_t *ecb; 11649 dtrace_probedesc_t *desc; 11650 dtrace_predicate_t *pred = NULL; 11651 11652 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 11653 dtrace_dof_error(dof, "truncated ECB description"); 11654 return (NULL); 11655 } 11656 11657 if (sec->dofs_align != sizeof (uint64_t)) { 11658 dtrace_dof_error(dof, "bad alignment in ECB description"); 11659 return (NULL); 11660 } 11661 11662 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 11663 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 11664 11665 if (sec == NULL) 11666 return (NULL); 11667 11668 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11669 ep->dted_uarg = ecb->dofe_uarg; 11670 desc = &ep->dted_probe; 11671 11672 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 11673 goto err; 11674 11675 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 11676 if ((sec = dtrace_dof_sect(dof, 11677 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 11678 goto err; 11679 11680 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 11681 goto err; 11682 11683 ep->dted_pred.dtpdd_predicate = pred; 11684 } 11685 11686 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 11687 if ((sec = dtrace_dof_sect(dof, 11688 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 11689 goto err; 11690 11691 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 11692 11693 if (ep->dted_action == NULL) 11694 goto err; 11695 } 11696 11697 return (ep); 11698 11699 err: 11700 if (pred != NULL) 11701 dtrace_predicate_release(pred, vstate); 11702 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11703 return (NULL); 11704 } 11705 11706 /* 11707 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 11708 * specified DOF. At present, this amounts to simply adding 'ubase' to the 11709 * site of any user SETX relocations to account for load object base address. 11710 * In the future, if we need other relocations, this function can be extended. 11711 */ 11712 static int 11713 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 11714 { 11715 uintptr_t daddr = (uintptr_t)dof; 11716 dof_relohdr_t *dofr = 11717 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11718 dof_sec_t *ss, *rs, *ts; 11719 dof_relodesc_t *r; 11720 uint_t i, n; 11721 11722 if (sec->dofs_size < sizeof (dof_relohdr_t) || 11723 sec->dofs_align != sizeof (dof_secidx_t)) { 11724 dtrace_dof_error(dof, "invalid relocation header"); 11725 return (-1); 11726 } 11727 11728 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 11729 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 11730 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 11731 11732 if (ss == NULL || rs == NULL || ts == NULL) 11733 return (-1); /* dtrace_dof_error() has been called already */ 11734 11735 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 11736 rs->dofs_align != sizeof (uint64_t)) { 11737 dtrace_dof_error(dof, "invalid relocation section"); 11738 return (-1); 11739 } 11740 11741 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 11742 n = rs->dofs_size / rs->dofs_entsize; 11743 11744 for (i = 0; i < n; i++) { 11745 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 11746 11747 switch (r->dofr_type) { 11748 case DOF_RELO_NONE: 11749 break; 11750 case DOF_RELO_SETX: 11751 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 11752 sizeof (uint64_t) > ts->dofs_size) { 11753 dtrace_dof_error(dof, "bad relocation offset"); 11754 return (-1); 11755 } 11756 11757 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 11758 dtrace_dof_error(dof, "misaligned setx relo"); 11759 return (-1); 11760 } 11761 11762 *(uint64_t *)taddr += ubase; 11763 break; 11764 default: 11765 dtrace_dof_error(dof, "invalid relocation type"); 11766 return (-1); 11767 } 11768 11769 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 11770 } 11771 11772 return (0); 11773 } 11774 11775 /* 11776 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 11777 * header: it should be at the front of a memory region that is at least 11778 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 11779 * size. It need not be validated in any other way. 11780 */ 11781 static int 11782 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 11783 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 11784 { 11785 uint64_t len = dof->dofh_loadsz, seclen; 11786 uintptr_t daddr = (uintptr_t)dof; 11787 dtrace_ecbdesc_t *ep; 11788 dtrace_enabling_t *enab; 11789 uint_t i; 11790 11791 ASSERT(MUTEX_HELD(&dtrace_lock)); 11792 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 11793 11794 /* 11795 * Check the DOF header identification bytes. In addition to checking 11796 * valid settings, we also verify that unused bits/bytes are zeroed so 11797 * we can use them later without fear of regressing existing binaries. 11798 */ 11799 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 11800 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 11801 dtrace_dof_error(dof, "DOF magic string mismatch"); 11802 return (-1); 11803 } 11804 11805 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 11806 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 11807 dtrace_dof_error(dof, "DOF has invalid data model"); 11808 return (-1); 11809 } 11810 11811 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 11812 dtrace_dof_error(dof, "DOF encoding mismatch"); 11813 return (-1); 11814 } 11815 11816 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 11817 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 11818 dtrace_dof_error(dof, "DOF version mismatch"); 11819 return (-1); 11820 } 11821 11822 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 11823 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 11824 return (-1); 11825 } 11826 11827 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 11828 dtrace_dof_error(dof, "DOF uses too many integer registers"); 11829 return (-1); 11830 } 11831 11832 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 11833 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 11834 return (-1); 11835 } 11836 11837 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 11838 if (dof->dofh_ident[i] != 0) { 11839 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 11840 return (-1); 11841 } 11842 } 11843 11844 if (dof->dofh_flags & ~DOF_FL_VALID) { 11845 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 11846 return (-1); 11847 } 11848 11849 if (dof->dofh_secsize == 0) { 11850 dtrace_dof_error(dof, "zero section header size"); 11851 return (-1); 11852 } 11853 11854 /* 11855 * Check that the section headers don't exceed the amount of DOF 11856 * data. Note that we cast the section size and number of sections 11857 * to uint64_t's to prevent possible overflow in the multiplication. 11858 */ 11859 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 11860 11861 if (dof->dofh_secoff > len || seclen > len || 11862 dof->dofh_secoff + seclen > len) { 11863 dtrace_dof_error(dof, "truncated section headers"); 11864 return (-1); 11865 } 11866 11867 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 11868 dtrace_dof_error(dof, "misaligned section headers"); 11869 return (-1); 11870 } 11871 11872 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 11873 dtrace_dof_error(dof, "misaligned section size"); 11874 return (-1); 11875 } 11876 11877 /* 11878 * Take an initial pass through the section headers to be sure that 11879 * the headers don't have stray offsets. If the 'noprobes' flag is 11880 * set, do not permit sections relating to providers, probes, or args. 11881 */ 11882 for (i = 0; i < dof->dofh_secnum; i++) { 11883 dof_sec_t *sec = (dof_sec_t *)(daddr + 11884 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11885 11886 if (noprobes) { 11887 switch (sec->dofs_type) { 11888 case DOF_SECT_PROVIDER: 11889 case DOF_SECT_PROBES: 11890 case DOF_SECT_PRARGS: 11891 case DOF_SECT_PROFFS: 11892 dtrace_dof_error(dof, "illegal sections " 11893 "for enabling"); 11894 return (-1); 11895 } 11896 } 11897 11898 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 11899 !(sec->dofs_flags & DOF_SECF_LOAD)) { 11900 dtrace_dof_error(dof, "loadable section with load " 11901 "flag unset"); 11902 return (-1); 11903 } 11904 11905 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11906 continue; /* just ignore non-loadable sections */ 11907 11908 if (sec->dofs_align & (sec->dofs_align - 1)) { 11909 dtrace_dof_error(dof, "bad section alignment"); 11910 return (-1); 11911 } 11912 11913 if (sec->dofs_offset & (sec->dofs_align - 1)) { 11914 dtrace_dof_error(dof, "misaligned section"); 11915 return (-1); 11916 } 11917 11918 if (sec->dofs_offset > len || sec->dofs_size > len || 11919 sec->dofs_offset + sec->dofs_size > len) { 11920 dtrace_dof_error(dof, "corrupt section header"); 11921 return (-1); 11922 } 11923 11924 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 11925 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 11926 dtrace_dof_error(dof, "non-terminating string table"); 11927 return (-1); 11928 } 11929 } 11930 11931 /* 11932 * Take a second pass through the sections and locate and perform any 11933 * relocations that are present. We do this after the first pass to 11934 * be sure that all sections have had their headers validated. 11935 */ 11936 for (i = 0; i < dof->dofh_secnum; i++) { 11937 dof_sec_t *sec = (dof_sec_t *)(daddr + 11938 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11939 11940 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11941 continue; /* skip sections that are not loadable */ 11942 11943 switch (sec->dofs_type) { 11944 case DOF_SECT_URELHDR: 11945 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 11946 return (-1); 11947 break; 11948 } 11949 } 11950 11951 if ((enab = *enabp) == NULL) 11952 enab = *enabp = dtrace_enabling_create(vstate); 11953 11954 for (i = 0; i < dof->dofh_secnum; i++) { 11955 dof_sec_t *sec = (dof_sec_t *)(daddr + 11956 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11957 11958 if (sec->dofs_type != DOF_SECT_ECBDESC) 11959 continue; 11960 11961 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 11962 dtrace_enabling_destroy(enab); 11963 *enabp = NULL; 11964 return (-1); 11965 } 11966 11967 dtrace_enabling_add(enab, ep); 11968 } 11969 11970 return (0); 11971 } 11972 11973 /* 11974 * Process DOF for any options. This routine assumes that the DOF has been 11975 * at least processed by dtrace_dof_slurp(). 11976 */ 11977 static int 11978 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 11979 { 11980 int i, rval; 11981 uint32_t entsize; 11982 size_t offs; 11983 dof_optdesc_t *desc; 11984 11985 for (i = 0; i < dof->dofh_secnum; i++) { 11986 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 11987 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11988 11989 if (sec->dofs_type != DOF_SECT_OPTDESC) 11990 continue; 11991 11992 if (sec->dofs_align != sizeof (uint64_t)) { 11993 dtrace_dof_error(dof, "bad alignment in " 11994 "option description"); 11995 return (EINVAL); 11996 } 11997 11998 if ((entsize = sec->dofs_entsize) == 0) { 11999 dtrace_dof_error(dof, "zeroed option entry size"); 12000 return (EINVAL); 12001 } 12002 12003 if (entsize < sizeof (dof_optdesc_t)) { 12004 dtrace_dof_error(dof, "bad option entry size"); 12005 return (EINVAL); 12006 } 12007 12008 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12009 desc = (dof_optdesc_t *)((uintptr_t)dof + 12010 (uintptr_t)sec->dofs_offset + offs); 12011 12012 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12013 dtrace_dof_error(dof, "non-zero option string"); 12014 return (EINVAL); 12015 } 12016 12017 if (desc->dofo_value == DTRACEOPT_UNSET) { 12018 dtrace_dof_error(dof, "unset option"); 12019 return (EINVAL); 12020 } 12021 12022 if ((rval = dtrace_state_option(state, 12023 desc->dofo_option, desc->dofo_value)) != 0) { 12024 dtrace_dof_error(dof, "rejected option"); 12025 return (rval); 12026 } 12027 } 12028 } 12029 12030 return (0); 12031 } 12032 12033 /* 12034 * DTrace Consumer State Functions 12035 */ 12036 int 12037 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12038 { 12039 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12040 void *base; 12041 uintptr_t limit; 12042 dtrace_dynvar_t *dvar, *next, *start; 12043 int i; 12044 12045 ASSERT(MUTEX_HELD(&dtrace_lock)); 12046 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12047 12048 bzero(dstate, sizeof (dtrace_dstate_t)); 12049 12050 if ((dstate->dtds_chunksize = chunksize) == 0) 12051 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12052 12053 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12054 size = min; 12055 12056 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12057 return (ENOMEM); 12058 12059 dstate->dtds_size = size; 12060 dstate->dtds_base = base; 12061 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12062 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12063 12064 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12065 12066 if (hashsize != 1 && (hashsize & 1)) 12067 hashsize--; 12068 12069 dstate->dtds_hashsize = hashsize; 12070 dstate->dtds_hash = dstate->dtds_base; 12071 12072 /* 12073 * Set all of our hash buckets to point to the single sink, and (if 12074 * it hasn't already been set), set the sink's hash value to be the 12075 * sink sentinel value. The sink is needed for dynamic variable 12076 * lookups to know that they have iterated over an entire, valid hash 12077 * chain. 12078 */ 12079 for (i = 0; i < hashsize; i++) 12080 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12081 12082 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12083 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12084 12085 /* 12086 * Determine number of active CPUs. Divide free list evenly among 12087 * active CPUs. 12088 */ 12089 start = (dtrace_dynvar_t *) 12090 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12091 limit = (uintptr_t)base + size; 12092 12093 maxper = (limit - (uintptr_t)start) / NCPU; 12094 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12095 12096 for (i = 0; i < NCPU; i++) { 12097 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12098 12099 /* 12100 * If we don't even have enough chunks to make it once through 12101 * NCPUs, we're just going to allocate everything to the first 12102 * CPU. And if we're on the last CPU, we're going to allocate 12103 * whatever is left over. In either case, we set the limit to 12104 * be the limit of the dynamic variable space. 12105 */ 12106 if (maxper == 0 || i == NCPU - 1) { 12107 limit = (uintptr_t)base + size; 12108 start = NULL; 12109 } else { 12110 limit = (uintptr_t)start + maxper; 12111 start = (dtrace_dynvar_t *)limit; 12112 } 12113 12114 ASSERT(limit <= (uintptr_t)base + size); 12115 12116 for (;;) { 12117 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12118 dstate->dtds_chunksize); 12119 12120 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12121 break; 12122 12123 dvar->dtdv_next = next; 12124 dvar = next; 12125 } 12126 12127 if (maxper == 0) 12128 break; 12129 } 12130 12131 return (0); 12132 } 12133 12134 void 12135 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12136 { 12137 ASSERT(MUTEX_HELD(&cpu_lock)); 12138 12139 if (dstate->dtds_base == NULL) 12140 return; 12141 12142 kmem_free(dstate->dtds_base, dstate->dtds_size); 12143 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12144 } 12145 12146 static void 12147 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12148 { 12149 /* 12150 * Logical XOR, where are you? 12151 */ 12152 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12153 12154 if (vstate->dtvs_nglobals > 0) { 12155 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12156 sizeof (dtrace_statvar_t *)); 12157 } 12158 12159 if (vstate->dtvs_ntlocals > 0) { 12160 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12161 sizeof (dtrace_difv_t)); 12162 } 12163 12164 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12165 12166 if (vstate->dtvs_nlocals > 0) { 12167 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12168 sizeof (dtrace_statvar_t *)); 12169 } 12170 } 12171 12172 static void 12173 dtrace_state_clean(dtrace_state_t *state) 12174 { 12175 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12176 return; 12177 12178 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12179 dtrace_speculation_clean(state); 12180 } 12181 12182 static void 12183 dtrace_state_deadman(dtrace_state_t *state) 12184 { 12185 hrtime_t now; 12186 12187 dtrace_sync(); 12188 12189 now = dtrace_gethrtime(); 12190 12191 if (state != dtrace_anon.dta_state && 12192 now - state->dts_laststatus >= dtrace_deadman_user) 12193 return; 12194 12195 /* 12196 * We must be sure that dts_alive never appears to be less than the 12197 * value upon entry to dtrace_state_deadman(), and because we lack a 12198 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12199 * store INT64_MAX to it, followed by a memory barrier, followed by 12200 * the new value. This assures that dts_alive never appears to be 12201 * less than its true value, regardless of the order in which the 12202 * stores to the underlying storage are issued. 12203 */ 12204 state->dts_alive = INT64_MAX; 12205 dtrace_membar_producer(); 12206 state->dts_alive = now; 12207 } 12208 12209 dtrace_state_t * 12210 dtrace_state_create(dev_t *devp, cred_t *cr) 12211 { 12212 minor_t minor; 12213 major_t major; 12214 char c[30]; 12215 dtrace_state_t *state; 12216 dtrace_optval_t *opt; 12217 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12218 12219 ASSERT(MUTEX_HELD(&dtrace_lock)); 12220 ASSERT(MUTEX_HELD(&cpu_lock)); 12221 12222 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12223 VM_BESTFIT | VM_SLEEP); 12224 12225 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12226 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12227 return (NULL); 12228 } 12229 12230 state = ddi_get_soft_state(dtrace_softstate, minor); 12231 state->dts_epid = DTRACE_EPIDNONE + 1; 12232 12233 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 12234 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12235 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12236 12237 if (devp != NULL) { 12238 major = getemajor(*devp); 12239 } else { 12240 major = ddi_driver_major(dtrace_devi); 12241 } 12242 12243 state->dts_dev = makedevice(major, minor); 12244 12245 if (devp != NULL) 12246 *devp = state->dts_dev; 12247 12248 /* 12249 * We allocate NCPU buffers. On the one hand, this can be quite 12250 * a bit of memory per instance (nearly 36K on a Starcat). On the 12251 * other hand, it saves an additional memory reference in the probe 12252 * path. 12253 */ 12254 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12255 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12256 state->dts_cleaner = CYCLIC_NONE; 12257 state->dts_deadman = CYCLIC_NONE; 12258 state->dts_vstate.dtvs_state = state; 12259 12260 for (i = 0; i < DTRACEOPT_MAX; i++) 12261 state->dts_options[i] = DTRACEOPT_UNSET; 12262 12263 /* 12264 * Set the default options. 12265 */ 12266 opt = state->dts_options; 12267 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12268 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12269 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12270 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12271 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12272 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12273 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12274 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12275 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12276 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12277 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12278 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12279 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12280 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12281 12282 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12283 12284 /* 12285 * Depending on the user credentials, we set flag bits which alter probe 12286 * visibility or the amount of destructiveness allowed. In the case of 12287 * actual anonymous tracing, or the possession of all privileges, all of 12288 * the normal checks are bypassed. 12289 */ 12290 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12291 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12292 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12293 } else { 12294 /* 12295 * Set up the credentials for this instantiation. We take a 12296 * hold on the credential to prevent it from disappearing on 12297 * us; this in turn prevents the zone_t referenced by this 12298 * credential from disappearing. This means that we can 12299 * examine the credential and the zone from probe context. 12300 */ 12301 crhold(cr); 12302 state->dts_cred.dcr_cred = cr; 12303 12304 /* 12305 * CRA_PROC means "we have *some* privilege for dtrace" and 12306 * unlocks the use of variables like pid, zonename, etc. 12307 */ 12308 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12309 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12310 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12311 } 12312 12313 /* 12314 * dtrace_user allows use of syscall and profile providers. 12315 * If the user also has proc_owner and/or proc_zone, we 12316 * extend the scope to include additional visibility and 12317 * destructive power. 12318 */ 12319 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12320 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12321 state->dts_cred.dcr_visible |= 12322 DTRACE_CRV_ALLPROC; 12323 12324 state->dts_cred.dcr_action |= 12325 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12326 } 12327 12328 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12329 state->dts_cred.dcr_visible |= 12330 DTRACE_CRV_ALLZONE; 12331 12332 state->dts_cred.dcr_action |= 12333 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12334 } 12335 12336 /* 12337 * If we have all privs in whatever zone this is, 12338 * we can do destructive things to processes which 12339 * have altered credentials. 12340 */ 12341 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12342 cr->cr_zone->zone_privset)) { 12343 state->dts_cred.dcr_action |= 12344 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12345 } 12346 } 12347 12348 /* 12349 * Holding the dtrace_kernel privilege also implies that 12350 * the user has the dtrace_user privilege from a visibility 12351 * perspective. But without further privileges, some 12352 * destructive actions are not available. 12353 */ 12354 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12355 /* 12356 * Make all probes in all zones visible. However, 12357 * this doesn't mean that all actions become available 12358 * to all zones. 12359 */ 12360 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12361 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12362 12363 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12364 DTRACE_CRA_PROC; 12365 /* 12366 * Holding proc_owner means that destructive actions 12367 * for *this* zone are allowed. 12368 */ 12369 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12370 state->dts_cred.dcr_action |= 12371 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12372 12373 /* 12374 * Holding proc_zone means that destructive actions 12375 * for this user/group ID in all zones is allowed. 12376 */ 12377 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12378 state->dts_cred.dcr_action |= 12379 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12380 12381 /* 12382 * If we have all privs in whatever zone this is, 12383 * we can do destructive things to processes which 12384 * have altered credentials. 12385 */ 12386 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12387 cr->cr_zone->zone_privset)) { 12388 state->dts_cred.dcr_action |= 12389 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12390 } 12391 } 12392 12393 /* 12394 * Holding the dtrace_proc privilege gives control over fasttrap 12395 * and pid providers. We need to grant wider destructive 12396 * privileges in the event that the user has proc_owner and/or 12397 * proc_zone. 12398 */ 12399 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12400 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12401 state->dts_cred.dcr_action |= 12402 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12403 12404 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12405 state->dts_cred.dcr_action |= 12406 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12407 } 12408 } 12409 12410 return (state); 12411 } 12412 12413 static int 12414 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 12415 { 12416 dtrace_optval_t *opt = state->dts_options, size; 12417 processorid_t cpu; 12418 int flags = 0, rval; 12419 12420 ASSERT(MUTEX_HELD(&dtrace_lock)); 12421 ASSERT(MUTEX_HELD(&cpu_lock)); 12422 ASSERT(which < DTRACEOPT_MAX); 12423 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 12424 (state == dtrace_anon.dta_state && 12425 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 12426 12427 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 12428 return (0); 12429 12430 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 12431 cpu = opt[DTRACEOPT_CPU]; 12432 12433 if (which == DTRACEOPT_SPECSIZE) 12434 flags |= DTRACEBUF_NOSWITCH; 12435 12436 if (which == DTRACEOPT_BUFSIZE) { 12437 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 12438 flags |= DTRACEBUF_RING; 12439 12440 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 12441 flags |= DTRACEBUF_FILL; 12442 12443 if (state != dtrace_anon.dta_state || 12444 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12445 flags |= DTRACEBUF_INACTIVE; 12446 } 12447 12448 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 12449 /* 12450 * The size must be 8-byte aligned. If the size is not 8-byte 12451 * aligned, drop it down by the difference. 12452 */ 12453 if (size & (sizeof (uint64_t) - 1)) 12454 size -= size & (sizeof (uint64_t) - 1); 12455 12456 if (size < state->dts_reserve) { 12457 /* 12458 * Buffers always must be large enough to accommodate 12459 * their prereserved space. We return E2BIG instead 12460 * of ENOMEM in this case to allow for user-level 12461 * software to differentiate the cases. 12462 */ 12463 return (E2BIG); 12464 } 12465 12466 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 12467 12468 if (rval != ENOMEM) { 12469 opt[which] = size; 12470 return (rval); 12471 } 12472 12473 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12474 return (rval); 12475 } 12476 12477 return (ENOMEM); 12478 } 12479 12480 static int 12481 dtrace_state_buffers(dtrace_state_t *state) 12482 { 12483 dtrace_speculation_t *spec = state->dts_speculations; 12484 int rval, i; 12485 12486 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 12487 DTRACEOPT_BUFSIZE)) != 0) 12488 return (rval); 12489 12490 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 12491 DTRACEOPT_AGGSIZE)) != 0) 12492 return (rval); 12493 12494 for (i = 0; i < state->dts_nspeculations; i++) { 12495 if ((rval = dtrace_state_buffer(state, 12496 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 12497 return (rval); 12498 } 12499 12500 return (0); 12501 } 12502 12503 static void 12504 dtrace_state_prereserve(dtrace_state_t *state) 12505 { 12506 dtrace_ecb_t *ecb; 12507 dtrace_probe_t *probe; 12508 12509 state->dts_reserve = 0; 12510 12511 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 12512 return; 12513 12514 /* 12515 * If our buffer policy is a "fill" buffer policy, we need to set the 12516 * prereserved space to be the space required by the END probes. 12517 */ 12518 probe = dtrace_probes[dtrace_probeid_end - 1]; 12519 ASSERT(probe != NULL); 12520 12521 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 12522 if (ecb->dte_state != state) 12523 continue; 12524 12525 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 12526 } 12527 } 12528 12529 static int 12530 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 12531 { 12532 dtrace_optval_t *opt = state->dts_options, sz, nspec; 12533 dtrace_speculation_t *spec; 12534 dtrace_buffer_t *buf; 12535 cyc_handler_t hdlr; 12536 cyc_time_t when; 12537 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12538 dtrace_icookie_t cookie; 12539 12540 mutex_enter(&cpu_lock); 12541 mutex_enter(&dtrace_lock); 12542 12543 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 12544 rval = EBUSY; 12545 goto out; 12546 } 12547 12548 /* 12549 * Before we can perform any checks, we must prime all of the 12550 * retained enablings that correspond to this state. 12551 */ 12552 dtrace_enabling_prime(state); 12553 12554 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 12555 rval = EACCES; 12556 goto out; 12557 } 12558 12559 dtrace_state_prereserve(state); 12560 12561 /* 12562 * Now we want to do is try to allocate our speculations. 12563 * We do not automatically resize the number of speculations; if 12564 * this fails, we will fail the operation. 12565 */ 12566 nspec = opt[DTRACEOPT_NSPEC]; 12567 ASSERT(nspec != DTRACEOPT_UNSET); 12568 12569 if (nspec > INT_MAX) { 12570 rval = ENOMEM; 12571 goto out; 12572 } 12573 12574 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 12575 12576 if (spec == NULL) { 12577 rval = ENOMEM; 12578 goto out; 12579 } 12580 12581 state->dts_speculations = spec; 12582 state->dts_nspeculations = (int)nspec; 12583 12584 for (i = 0; i < nspec; i++) { 12585 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 12586 rval = ENOMEM; 12587 goto err; 12588 } 12589 12590 spec[i].dtsp_buffer = buf; 12591 } 12592 12593 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 12594 if (dtrace_anon.dta_state == NULL) { 12595 rval = ENOENT; 12596 goto out; 12597 } 12598 12599 if (state->dts_necbs != 0) { 12600 rval = EALREADY; 12601 goto out; 12602 } 12603 12604 state->dts_anon = dtrace_anon_grab(); 12605 ASSERT(state->dts_anon != NULL); 12606 state = state->dts_anon; 12607 12608 /* 12609 * We want "grabanon" to be set in the grabbed state, so we'll 12610 * copy that option value from the grabbing state into the 12611 * grabbed state. 12612 */ 12613 state->dts_options[DTRACEOPT_GRABANON] = 12614 opt[DTRACEOPT_GRABANON]; 12615 12616 *cpu = dtrace_anon.dta_beganon; 12617 12618 /* 12619 * If the anonymous state is active (as it almost certainly 12620 * is if the anonymous enabling ultimately matched anything), 12621 * we don't allow any further option processing -- but we 12622 * don't return failure. 12623 */ 12624 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12625 goto out; 12626 } 12627 12628 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 12629 opt[DTRACEOPT_AGGSIZE] != 0) { 12630 if (state->dts_aggregations == NULL) { 12631 /* 12632 * We're not going to create an aggregation buffer 12633 * because we don't have any ECBs that contain 12634 * aggregations -- set this option to 0. 12635 */ 12636 opt[DTRACEOPT_AGGSIZE] = 0; 12637 } else { 12638 /* 12639 * If we have an aggregation buffer, we must also have 12640 * a buffer to use as scratch. 12641 */ 12642 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 12643 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 12644 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 12645 } 12646 } 12647 } 12648 12649 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 12650 opt[DTRACEOPT_SPECSIZE] != 0) { 12651 if (!state->dts_speculates) { 12652 /* 12653 * We're not going to create speculation buffers 12654 * because we don't have any ECBs that actually 12655 * speculate -- set the speculation size to 0. 12656 */ 12657 opt[DTRACEOPT_SPECSIZE] = 0; 12658 } 12659 } 12660 12661 /* 12662 * The bare minimum size for any buffer that we're actually going to 12663 * do anything to is sizeof (uint64_t). 12664 */ 12665 sz = sizeof (uint64_t); 12666 12667 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 12668 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 12669 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 12670 /* 12671 * A buffer size has been explicitly set to 0 (or to a size 12672 * that will be adjusted to 0) and we need the space -- we 12673 * need to return failure. We return ENOSPC to differentiate 12674 * it from failing to allocate a buffer due to failure to meet 12675 * the reserve (for which we return E2BIG). 12676 */ 12677 rval = ENOSPC; 12678 goto out; 12679 } 12680 12681 if ((rval = dtrace_state_buffers(state)) != 0) 12682 goto err; 12683 12684 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 12685 sz = dtrace_dstate_defsize; 12686 12687 do { 12688 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 12689 12690 if (rval == 0) 12691 break; 12692 12693 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12694 goto err; 12695 } while (sz >>= 1); 12696 12697 opt[DTRACEOPT_DYNVARSIZE] = sz; 12698 12699 if (rval != 0) 12700 goto err; 12701 12702 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 12703 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 12704 12705 if (opt[DTRACEOPT_CLEANRATE] == 0) 12706 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12707 12708 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 12709 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 12710 12711 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 12712 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12713 12714 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 12715 hdlr.cyh_arg = state; 12716 hdlr.cyh_level = CY_LOW_LEVEL; 12717 12718 when.cyt_when = 0; 12719 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 12720 12721 state->dts_cleaner = cyclic_add(&hdlr, &when); 12722 12723 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 12724 hdlr.cyh_arg = state; 12725 hdlr.cyh_level = CY_LOW_LEVEL; 12726 12727 when.cyt_when = 0; 12728 when.cyt_interval = dtrace_deadman_interval; 12729 12730 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 12731 state->dts_deadman = cyclic_add(&hdlr, &when); 12732 12733 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 12734 12735 /* 12736 * Now it's time to actually fire the BEGIN probe. We need to disable 12737 * interrupts here both to record the CPU on which we fired the BEGIN 12738 * probe (the data from this CPU will be processed first at user 12739 * level) and to manually activate the buffer for this CPU. 12740 */ 12741 cookie = dtrace_interrupt_disable(); 12742 *cpu = CPU->cpu_id; 12743 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 12744 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12745 12746 dtrace_probe(dtrace_probeid_begin, 12747 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12748 dtrace_interrupt_enable(cookie); 12749 /* 12750 * We may have had an exit action from a BEGIN probe; only change our 12751 * state to ACTIVE if we're still in WARMUP. 12752 */ 12753 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 12754 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 12755 12756 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 12757 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 12758 12759 /* 12760 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 12761 * want each CPU to transition its principal buffer out of the 12762 * INACTIVE state. Doing this assures that no CPU will suddenly begin 12763 * processing an ECB halfway down a probe's ECB chain; all CPUs will 12764 * atomically transition from processing none of a state's ECBs to 12765 * processing all of them. 12766 */ 12767 dtrace_xcall(DTRACE_CPUALL, 12768 (dtrace_xcall_t)dtrace_buffer_activate, state); 12769 goto out; 12770 12771 err: 12772 dtrace_buffer_free(state->dts_buffer); 12773 dtrace_buffer_free(state->dts_aggbuffer); 12774 12775 if ((nspec = state->dts_nspeculations) == 0) { 12776 ASSERT(state->dts_speculations == NULL); 12777 goto out; 12778 } 12779 12780 spec = state->dts_speculations; 12781 ASSERT(spec != NULL); 12782 12783 for (i = 0; i < state->dts_nspeculations; i++) { 12784 if ((buf = spec[i].dtsp_buffer) == NULL) 12785 break; 12786 12787 dtrace_buffer_free(buf); 12788 kmem_free(buf, bufsize); 12789 } 12790 12791 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12792 state->dts_nspeculations = 0; 12793 state->dts_speculations = NULL; 12794 12795 out: 12796 mutex_exit(&dtrace_lock); 12797 mutex_exit(&cpu_lock); 12798 12799 return (rval); 12800 } 12801 12802 static int 12803 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 12804 { 12805 dtrace_icookie_t cookie; 12806 12807 ASSERT(MUTEX_HELD(&dtrace_lock)); 12808 12809 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 12810 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 12811 return (EINVAL); 12812 12813 /* 12814 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 12815 * to be sure that every CPU has seen it. See below for the details 12816 * on why this is done. 12817 */ 12818 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 12819 dtrace_sync(); 12820 12821 /* 12822 * By this point, it is impossible for any CPU to be still processing 12823 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 12824 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 12825 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 12826 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 12827 * iff we're in the END probe. 12828 */ 12829 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 12830 dtrace_sync(); 12831 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 12832 12833 /* 12834 * Finally, we can release the reserve and call the END probe. We 12835 * disable interrupts across calling the END probe to allow us to 12836 * return the CPU on which we actually called the END probe. This 12837 * allows user-land to be sure that this CPU's principal buffer is 12838 * processed last. 12839 */ 12840 state->dts_reserve = 0; 12841 12842 cookie = dtrace_interrupt_disable(); 12843 *cpu = CPU->cpu_id; 12844 dtrace_probe(dtrace_probeid_end, 12845 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12846 dtrace_interrupt_enable(cookie); 12847 12848 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 12849 dtrace_sync(); 12850 12851 return (0); 12852 } 12853 12854 static int 12855 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 12856 dtrace_optval_t val) 12857 { 12858 ASSERT(MUTEX_HELD(&dtrace_lock)); 12859 12860 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12861 return (EBUSY); 12862 12863 if (option >= DTRACEOPT_MAX) 12864 return (EINVAL); 12865 12866 if (option != DTRACEOPT_CPU && val < 0) 12867 return (EINVAL); 12868 12869 switch (option) { 12870 case DTRACEOPT_DESTRUCTIVE: 12871 if (dtrace_destructive_disallow) 12872 return (EACCES); 12873 12874 state->dts_cred.dcr_destructive = 1; 12875 break; 12876 12877 case DTRACEOPT_BUFSIZE: 12878 case DTRACEOPT_DYNVARSIZE: 12879 case DTRACEOPT_AGGSIZE: 12880 case DTRACEOPT_SPECSIZE: 12881 case DTRACEOPT_STRSIZE: 12882 if (val < 0) 12883 return (EINVAL); 12884 12885 if (val >= LONG_MAX) { 12886 /* 12887 * If this is an otherwise negative value, set it to 12888 * the highest multiple of 128m less than LONG_MAX. 12889 * Technically, we're adjusting the size without 12890 * regard to the buffer resizing policy, but in fact, 12891 * this has no effect -- if we set the buffer size to 12892 * ~LONG_MAX and the buffer policy is ultimately set to 12893 * be "manual", the buffer allocation is guaranteed to 12894 * fail, if only because the allocation requires two 12895 * buffers. (We set the the size to the highest 12896 * multiple of 128m because it ensures that the size 12897 * will remain a multiple of a megabyte when 12898 * repeatedly halved -- all the way down to 15m.) 12899 */ 12900 val = LONG_MAX - (1 << 27) + 1; 12901 } 12902 } 12903 12904 state->dts_options[option] = val; 12905 12906 return (0); 12907 } 12908 12909 static void 12910 dtrace_state_destroy(dtrace_state_t *state) 12911 { 12912 dtrace_ecb_t *ecb; 12913 dtrace_vstate_t *vstate = &state->dts_vstate; 12914 minor_t minor = getminor(state->dts_dev); 12915 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12916 dtrace_speculation_t *spec = state->dts_speculations; 12917 int nspec = state->dts_nspeculations; 12918 uint32_t match; 12919 12920 ASSERT(MUTEX_HELD(&dtrace_lock)); 12921 ASSERT(MUTEX_HELD(&cpu_lock)); 12922 12923 /* 12924 * First, retract any retained enablings for this state. 12925 */ 12926 dtrace_enabling_retract(state); 12927 ASSERT(state->dts_nretained == 0); 12928 12929 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 12930 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 12931 /* 12932 * We have managed to come into dtrace_state_destroy() on a 12933 * hot enabling -- almost certainly because of a disorderly 12934 * shutdown of a consumer. (That is, a consumer that is 12935 * exiting without having called dtrace_stop().) In this case, 12936 * we're going to set our activity to be KILLED, and then 12937 * issue a sync to be sure that everyone is out of probe 12938 * context before we start blowing away ECBs. 12939 */ 12940 state->dts_activity = DTRACE_ACTIVITY_KILLED; 12941 dtrace_sync(); 12942 } 12943 12944 /* 12945 * Release the credential hold we took in dtrace_state_create(). 12946 */ 12947 if (state->dts_cred.dcr_cred != NULL) 12948 crfree(state->dts_cred.dcr_cred); 12949 12950 /* 12951 * Now we can safely disable and destroy any enabled probes. Because 12952 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 12953 * (especially if they're all enabled), we take two passes through the 12954 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 12955 * in the second we disable whatever is left over. 12956 */ 12957 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 12958 for (i = 0; i < state->dts_necbs; i++) { 12959 if ((ecb = state->dts_ecbs[i]) == NULL) 12960 continue; 12961 12962 if (match && ecb->dte_probe != NULL) { 12963 dtrace_probe_t *probe = ecb->dte_probe; 12964 dtrace_provider_t *prov = probe->dtpr_provider; 12965 12966 if (!(prov->dtpv_priv.dtpp_flags & match)) 12967 continue; 12968 } 12969 12970 dtrace_ecb_disable(ecb); 12971 dtrace_ecb_destroy(ecb); 12972 } 12973 12974 if (!match) 12975 break; 12976 } 12977 12978 /* 12979 * Before we free the buffers, perform one more sync to assure that 12980 * every CPU is out of probe context. 12981 */ 12982 dtrace_sync(); 12983 12984 dtrace_buffer_free(state->dts_buffer); 12985 dtrace_buffer_free(state->dts_aggbuffer); 12986 12987 for (i = 0; i < nspec; i++) 12988 dtrace_buffer_free(spec[i].dtsp_buffer); 12989 12990 if (state->dts_cleaner != CYCLIC_NONE) 12991 cyclic_remove(state->dts_cleaner); 12992 12993 if (state->dts_deadman != CYCLIC_NONE) 12994 cyclic_remove(state->dts_deadman); 12995 12996 dtrace_dstate_fini(&vstate->dtvs_dynvars); 12997 dtrace_vstate_fini(vstate); 12998 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 12999 13000 if (state->dts_aggregations != NULL) { 13001 #ifdef DEBUG 13002 for (i = 0; i < state->dts_naggregations; i++) 13003 ASSERT(state->dts_aggregations[i] == NULL); 13004 #endif 13005 ASSERT(state->dts_naggregations > 0); 13006 kmem_free(state->dts_aggregations, 13007 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13008 } 13009 13010 kmem_free(state->dts_buffer, bufsize); 13011 kmem_free(state->dts_aggbuffer, bufsize); 13012 13013 for (i = 0; i < nspec; i++) 13014 kmem_free(spec[i].dtsp_buffer, bufsize); 13015 13016 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13017 13018 dtrace_format_destroy(state); 13019 13020 vmem_destroy(state->dts_aggid_arena); 13021 ddi_soft_state_free(dtrace_softstate, minor); 13022 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13023 } 13024 13025 /* 13026 * DTrace Anonymous Enabling Functions 13027 */ 13028 static dtrace_state_t * 13029 dtrace_anon_grab(void) 13030 { 13031 dtrace_state_t *state; 13032 13033 ASSERT(MUTEX_HELD(&dtrace_lock)); 13034 13035 if ((state = dtrace_anon.dta_state) == NULL) { 13036 ASSERT(dtrace_anon.dta_enabling == NULL); 13037 return (NULL); 13038 } 13039 13040 ASSERT(dtrace_anon.dta_enabling != NULL); 13041 ASSERT(dtrace_retained != NULL); 13042 13043 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13044 dtrace_anon.dta_enabling = NULL; 13045 dtrace_anon.dta_state = NULL; 13046 13047 return (state); 13048 } 13049 13050 static void 13051 dtrace_anon_property(void) 13052 { 13053 int i, rv; 13054 dtrace_state_t *state; 13055 dof_hdr_t *dof; 13056 char c[32]; /* enough for "dof-data-" + digits */ 13057 13058 ASSERT(MUTEX_HELD(&dtrace_lock)); 13059 ASSERT(MUTEX_HELD(&cpu_lock)); 13060 13061 for (i = 0; ; i++) { 13062 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13063 13064 dtrace_err_verbose = 1; 13065 13066 if ((dof = dtrace_dof_property(c)) == NULL) { 13067 dtrace_err_verbose = 0; 13068 break; 13069 } 13070 13071 /* 13072 * We want to create anonymous state, so we need to transition 13073 * the kernel debugger to indicate that DTrace is active. If 13074 * this fails (e.g. because the debugger has modified text in 13075 * some way), we won't continue with the processing. 13076 */ 13077 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13078 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13079 "enabling ignored."); 13080 dtrace_dof_destroy(dof); 13081 break; 13082 } 13083 13084 /* 13085 * If we haven't allocated an anonymous state, we'll do so now. 13086 */ 13087 if ((state = dtrace_anon.dta_state) == NULL) { 13088 state = dtrace_state_create(NULL, NULL); 13089 dtrace_anon.dta_state = state; 13090 13091 if (state == NULL) { 13092 /* 13093 * This basically shouldn't happen: the only 13094 * failure mode from dtrace_state_create() is a 13095 * failure of ddi_soft_state_zalloc() that 13096 * itself should never happen. Still, the 13097 * interface allows for a failure mode, and 13098 * we want to fail as gracefully as possible: 13099 * we'll emit an error message and cease 13100 * processing anonymous state in this case. 13101 */ 13102 cmn_err(CE_WARN, "failed to create " 13103 "anonymous state"); 13104 dtrace_dof_destroy(dof); 13105 break; 13106 } 13107 } 13108 13109 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13110 &dtrace_anon.dta_enabling, 0, B_TRUE); 13111 13112 if (rv == 0) 13113 rv = dtrace_dof_options(dof, state); 13114 13115 dtrace_err_verbose = 0; 13116 dtrace_dof_destroy(dof); 13117 13118 if (rv != 0) { 13119 /* 13120 * This is malformed DOF; chuck any anonymous state 13121 * that we created. 13122 */ 13123 ASSERT(dtrace_anon.dta_enabling == NULL); 13124 dtrace_state_destroy(state); 13125 dtrace_anon.dta_state = NULL; 13126 break; 13127 } 13128 13129 ASSERT(dtrace_anon.dta_enabling != NULL); 13130 } 13131 13132 if (dtrace_anon.dta_enabling != NULL) { 13133 int rval; 13134 13135 /* 13136 * dtrace_enabling_retain() can only fail because we are 13137 * trying to retain more enablings than are allowed -- but 13138 * we only have one anonymous enabling, and we are guaranteed 13139 * to be allowed at least one retained enabling; we assert 13140 * that dtrace_enabling_retain() returns success. 13141 */ 13142 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13143 ASSERT(rval == 0); 13144 13145 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13146 } 13147 } 13148 13149 /* 13150 * DTrace Helper Functions 13151 */ 13152 static void 13153 dtrace_helper_trace(dtrace_helper_action_t *helper, 13154 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13155 { 13156 uint32_t size, next, nnext, i; 13157 dtrace_helptrace_t *ent; 13158 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13159 13160 if (!dtrace_helptrace_enabled) 13161 return; 13162 13163 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13164 13165 /* 13166 * What would a tracing framework be without its own tracing 13167 * framework? (Well, a hell of a lot simpler, for starters...) 13168 */ 13169 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13170 sizeof (uint64_t) - sizeof (uint64_t); 13171 13172 /* 13173 * Iterate until we can allocate a slot in the trace buffer. 13174 */ 13175 do { 13176 next = dtrace_helptrace_next; 13177 13178 if (next + size < dtrace_helptrace_bufsize) { 13179 nnext = next + size; 13180 } else { 13181 nnext = size; 13182 } 13183 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13184 13185 /* 13186 * We have our slot; fill it in. 13187 */ 13188 if (nnext == size) 13189 next = 0; 13190 13191 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13192 ent->dtht_helper = helper; 13193 ent->dtht_where = where; 13194 ent->dtht_nlocals = vstate->dtvs_nlocals; 13195 13196 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13197 mstate->dtms_fltoffs : -1; 13198 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13199 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 13200 13201 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13202 dtrace_statvar_t *svar; 13203 13204 if ((svar = vstate->dtvs_locals[i]) == NULL) 13205 continue; 13206 13207 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13208 ent->dtht_locals[i] = 13209 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 13210 } 13211 } 13212 13213 static uint64_t 13214 dtrace_helper(int which, dtrace_mstate_t *mstate, 13215 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13216 { 13217 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13218 uint64_t sarg0 = mstate->dtms_arg[0]; 13219 uint64_t sarg1 = mstate->dtms_arg[1]; 13220 uint64_t rval; 13221 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13222 dtrace_helper_action_t *helper; 13223 dtrace_vstate_t *vstate; 13224 dtrace_difo_t *pred; 13225 int i, trace = dtrace_helptrace_enabled; 13226 13227 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13228 13229 if (helpers == NULL) 13230 return (0); 13231 13232 if ((helper = helpers->dthps_actions[which]) == NULL) 13233 return (0); 13234 13235 vstate = &helpers->dthps_vstate; 13236 mstate->dtms_arg[0] = arg0; 13237 mstate->dtms_arg[1] = arg1; 13238 13239 /* 13240 * Now iterate over each helper. If its predicate evaluates to 'true', 13241 * we'll call the corresponding actions. Note that the below calls 13242 * to dtrace_dif_emulate() may set faults in machine state. This is 13243 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13244 * the stored DIF offset with its own (which is the desired behavior). 13245 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13246 * from machine state; this is okay, too. 13247 */ 13248 for (; helper != NULL; helper = helper->dtha_next) { 13249 if ((pred = helper->dtha_predicate) != NULL) { 13250 if (trace) 13251 dtrace_helper_trace(helper, mstate, vstate, 0); 13252 13253 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13254 goto next; 13255 13256 if (*flags & CPU_DTRACE_FAULT) 13257 goto err; 13258 } 13259 13260 for (i = 0; i < helper->dtha_nactions; i++) { 13261 if (trace) 13262 dtrace_helper_trace(helper, 13263 mstate, vstate, i + 1); 13264 13265 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13266 mstate, vstate, state); 13267 13268 if (*flags & CPU_DTRACE_FAULT) 13269 goto err; 13270 } 13271 13272 next: 13273 if (trace) 13274 dtrace_helper_trace(helper, mstate, vstate, 13275 DTRACE_HELPTRACE_NEXT); 13276 } 13277 13278 if (trace) 13279 dtrace_helper_trace(helper, mstate, vstate, 13280 DTRACE_HELPTRACE_DONE); 13281 13282 /* 13283 * Restore the arg0 that we saved upon entry. 13284 */ 13285 mstate->dtms_arg[0] = sarg0; 13286 mstate->dtms_arg[1] = sarg1; 13287 13288 return (rval); 13289 13290 err: 13291 if (trace) 13292 dtrace_helper_trace(helper, mstate, vstate, 13293 DTRACE_HELPTRACE_ERR); 13294 13295 /* 13296 * Restore the arg0 that we saved upon entry. 13297 */ 13298 mstate->dtms_arg[0] = sarg0; 13299 mstate->dtms_arg[1] = sarg1; 13300 13301 return (NULL); 13302 } 13303 13304 static void 13305 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13306 dtrace_vstate_t *vstate) 13307 { 13308 int i; 13309 13310 if (helper->dtha_predicate != NULL) 13311 dtrace_difo_release(helper->dtha_predicate, vstate); 13312 13313 for (i = 0; i < helper->dtha_nactions; i++) { 13314 ASSERT(helper->dtha_actions[i] != NULL); 13315 dtrace_difo_release(helper->dtha_actions[i], vstate); 13316 } 13317 13318 kmem_free(helper->dtha_actions, 13319 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13320 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13321 } 13322 13323 static int 13324 dtrace_helper_destroygen(int gen) 13325 { 13326 proc_t *p = curproc; 13327 dtrace_helpers_t *help = p->p_dtrace_helpers; 13328 dtrace_vstate_t *vstate; 13329 int i; 13330 13331 ASSERT(MUTEX_HELD(&dtrace_lock)); 13332 13333 if (help == NULL || gen > help->dthps_generation) 13334 return (EINVAL); 13335 13336 vstate = &help->dthps_vstate; 13337 13338 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13339 dtrace_helper_action_t *last = NULL, *h, *next; 13340 13341 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13342 next = h->dtha_next; 13343 13344 if (h->dtha_generation == gen) { 13345 if (last != NULL) { 13346 last->dtha_next = next; 13347 } else { 13348 help->dthps_actions[i] = next; 13349 } 13350 13351 dtrace_helper_action_destroy(h, vstate); 13352 } else { 13353 last = h; 13354 } 13355 } 13356 } 13357 13358 /* 13359 * Interate until we've cleared out all helper providers with the 13360 * given generation number. 13361 */ 13362 for (;;) { 13363 dtrace_helper_provider_t *prov; 13364 13365 /* 13366 * Look for a helper provider with the right generation. We 13367 * have to start back at the beginning of the list each time 13368 * because we drop dtrace_lock. It's unlikely that we'll make 13369 * more than two passes. 13370 */ 13371 for (i = 0; i < help->dthps_nprovs; i++) { 13372 prov = help->dthps_provs[i]; 13373 13374 if (prov->dthp_generation == gen) 13375 break; 13376 } 13377 13378 /* 13379 * If there were no matches, we're done. 13380 */ 13381 if (i == help->dthps_nprovs) 13382 break; 13383 13384 /* 13385 * Move the last helper provider into this slot. 13386 */ 13387 help->dthps_nprovs--; 13388 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 13389 help->dthps_provs[help->dthps_nprovs] = NULL; 13390 13391 mutex_exit(&dtrace_lock); 13392 13393 /* 13394 * If we have a meta provider, remove this helper provider. 13395 */ 13396 mutex_enter(&dtrace_meta_lock); 13397 if (dtrace_meta_pid != NULL) { 13398 ASSERT(dtrace_deferred_pid == NULL); 13399 dtrace_helper_provider_remove(&prov->dthp_prov, 13400 p->p_pid); 13401 } 13402 mutex_exit(&dtrace_meta_lock); 13403 13404 dtrace_helper_provider_destroy(prov); 13405 13406 mutex_enter(&dtrace_lock); 13407 } 13408 13409 return (0); 13410 } 13411 13412 static int 13413 dtrace_helper_validate(dtrace_helper_action_t *helper) 13414 { 13415 int err = 0, i; 13416 dtrace_difo_t *dp; 13417 13418 if ((dp = helper->dtha_predicate) != NULL) 13419 err += dtrace_difo_validate_helper(dp); 13420 13421 for (i = 0; i < helper->dtha_nactions; i++) 13422 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 13423 13424 return (err == 0); 13425 } 13426 13427 static int 13428 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 13429 { 13430 dtrace_helpers_t *help; 13431 dtrace_helper_action_t *helper, *last; 13432 dtrace_actdesc_t *act; 13433 dtrace_vstate_t *vstate; 13434 dtrace_predicate_t *pred; 13435 int count = 0, nactions = 0, i; 13436 13437 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 13438 return (EINVAL); 13439 13440 help = curproc->p_dtrace_helpers; 13441 last = help->dthps_actions[which]; 13442 vstate = &help->dthps_vstate; 13443 13444 for (count = 0; last != NULL; last = last->dtha_next) { 13445 count++; 13446 if (last->dtha_next == NULL) 13447 break; 13448 } 13449 13450 /* 13451 * If we already have dtrace_helper_actions_max helper actions for this 13452 * helper action type, we'll refuse to add a new one. 13453 */ 13454 if (count >= dtrace_helper_actions_max) 13455 return (ENOSPC); 13456 13457 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 13458 helper->dtha_generation = help->dthps_generation; 13459 13460 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 13461 ASSERT(pred->dtp_difo != NULL); 13462 dtrace_difo_hold(pred->dtp_difo); 13463 helper->dtha_predicate = pred->dtp_difo; 13464 } 13465 13466 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 13467 if (act->dtad_kind != DTRACEACT_DIFEXPR) 13468 goto err; 13469 13470 if (act->dtad_difo == NULL) 13471 goto err; 13472 13473 nactions++; 13474 } 13475 13476 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 13477 (helper->dtha_nactions = nactions), KM_SLEEP); 13478 13479 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 13480 dtrace_difo_hold(act->dtad_difo); 13481 helper->dtha_actions[i++] = act->dtad_difo; 13482 } 13483 13484 if (!dtrace_helper_validate(helper)) 13485 goto err; 13486 13487 if (last == NULL) { 13488 help->dthps_actions[which] = helper; 13489 } else { 13490 last->dtha_next = helper; 13491 } 13492 13493 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 13494 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 13495 dtrace_helptrace_next = 0; 13496 } 13497 13498 return (0); 13499 err: 13500 dtrace_helper_action_destroy(helper, vstate); 13501 return (EINVAL); 13502 } 13503 13504 static void 13505 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 13506 dof_helper_t *dofhp) 13507 { 13508 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 13509 13510 mutex_enter(&dtrace_meta_lock); 13511 mutex_enter(&dtrace_lock); 13512 13513 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 13514 /* 13515 * If the dtrace module is loaded but not attached, or if 13516 * there aren't isn't a meta provider registered to deal with 13517 * these provider descriptions, we need to postpone creating 13518 * the actual providers until later. 13519 */ 13520 13521 if (help->dthps_next == NULL && help->dthps_prev == NULL && 13522 dtrace_deferred_pid != help) { 13523 help->dthps_deferred = 1; 13524 help->dthps_pid = p->p_pid; 13525 help->dthps_next = dtrace_deferred_pid; 13526 help->dthps_prev = NULL; 13527 if (dtrace_deferred_pid != NULL) 13528 dtrace_deferred_pid->dthps_prev = help; 13529 dtrace_deferred_pid = help; 13530 } 13531 13532 mutex_exit(&dtrace_lock); 13533 13534 } else if (dofhp != NULL) { 13535 /* 13536 * If the dtrace module is loaded and we have a particular 13537 * helper provider description, pass that off to the 13538 * meta provider. 13539 */ 13540 13541 mutex_exit(&dtrace_lock); 13542 13543 dtrace_helper_provide(dofhp, p->p_pid); 13544 13545 } else { 13546 /* 13547 * Otherwise, just pass all the helper provider descriptions 13548 * off to the meta provider. 13549 */ 13550 13551 int i; 13552 mutex_exit(&dtrace_lock); 13553 13554 for (i = 0; i < help->dthps_nprovs; i++) { 13555 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 13556 p->p_pid); 13557 } 13558 } 13559 13560 mutex_exit(&dtrace_meta_lock); 13561 } 13562 13563 static int 13564 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 13565 { 13566 dtrace_helpers_t *help; 13567 dtrace_helper_provider_t *hprov, **tmp_provs; 13568 uint_t tmp_maxprovs, i; 13569 13570 ASSERT(MUTEX_HELD(&dtrace_lock)); 13571 13572 help = curproc->p_dtrace_helpers; 13573 ASSERT(help != NULL); 13574 13575 /* 13576 * If we already have dtrace_helper_providers_max helper providers, 13577 * we're refuse to add a new one. 13578 */ 13579 if (help->dthps_nprovs >= dtrace_helper_providers_max) 13580 return (ENOSPC); 13581 13582 /* 13583 * Check to make sure this isn't a duplicate. 13584 */ 13585 for (i = 0; i < help->dthps_nprovs; i++) { 13586 if (dofhp->dofhp_addr == 13587 help->dthps_provs[i]->dthp_prov.dofhp_addr) 13588 return (EALREADY); 13589 } 13590 13591 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 13592 hprov->dthp_prov = *dofhp; 13593 hprov->dthp_ref = 1; 13594 hprov->dthp_generation = gen; 13595 13596 /* 13597 * Allocate a bigger table for helper providers if it's already full. 13598 */ 13599 if (help->dthps_maxprovs == help->dthps_nprovs) { 13600 tmp_maxprovs = help->dthps_maxprovs; 13601 tmp_provs = help->dthps_provs; 13602 13603 if (help->dthps_maxprovs == 0) 13604 help->dthps_maxprovs = 2; 13605 else 13606 help->dthps_maxprovs *= 2; 13607 if (help->dthps_maxprovs > dtrace_helper_providers_max) 13608 help->dthps_maxprovs = dtrace_helper_providers_max; 13609 13610 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 13611 13612 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 13613 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13614 13615 if (tmp_provs != NULL) { 13616 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 13617 sizeof (dtrace_helper_provider_t *)); 13618 kmem_free(tmp_provs, tmp_maxprovs * 13619 sizeof (dtrace_helper_provider_t *)); 13620 } 13621 } 13622 13623 help->dthps_provs[help->dthps_nprovs] = hprov; 13624 help->dthps_nprovs++; 13625 13626 return (0); 13627 } 13628 13629 static void 13630 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 13631 { 13632 mutex_enter(&dtrace_lock); 13633 13634 if (--hprov->dthp_ref == 0) { 13635 dof_hdr_t *dof; 13636 mutex_exit(&dtrace_lock); 13637 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 13638 dtrace_dof_destroy(dof); 13639 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 13640 } else { 13641 mutex_exit(&dtrace_lock); 13642 } 13643 } 13644 13645 static int 13646 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 13647 { 13648 uintptr_t daddr = (uintptr_t)dof; 13649 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 13650 dof_provider_t *provider; 13651 dof_probe_t *probe; 13652 uint8_t *arg; 13653 char *strtab, *typestr; 13654 dof_stridx_t typeidx; 13655 size_t typesz; 13656 uint_t nprobes, j, k; 13657 13658 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 13659 13660 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 13661 dtrace_dof_error(dof, "misaligned section offset"); 13662 return (-1); 13663 } 13664 13665 /* 13666 * The section needs to be large enough to contain the DOF provider 13667 * structure appropriate for the given version. 13668 */ 13669 if (sec->dofs_size < 13670 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 13671 offsetof(dof_provider_t, dofpv_prenoffs) : 13672 sizeof (dof_provider_t))) { 13673 dtrace_dof_error(dof, "provider section too small"); 13674 return (-1); 13675 } 13676 13677 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 13678 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 13679 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 13680 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 13681 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 13682 13683 if (str_sec == NULL || prb_sec == NULL || 13684 arg_sec == NULL || off_sec == NULL) 13685 return (-1); 13686 13687 enoff_sec = NULL; 13688 13689 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13690 provider->dofpv_prenoffs != DOF_SECT_NONE && 13691 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 13692 provider->dofpv_prenoffs)) == NULL) 13693 return (-1); 13694 13695 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 13696 13697 if (provider->dofpv_name >= str_sec->dofs_size || 13698 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 13699 dtrace_dof_error(dof, "invalid provider name"); 13700 return (-1); 13701 } 13702 13703 if (prb_sec->dofs_entsize == 0 || 13704 prb_sec->dofs_entsize > prb_sec->dofs_size) { 13705 dtrace_dof_error(dof, "invalid entry size"); 13706 return (-1); 13707 } 13708 13709 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 13710 dtrace_dof_error(dof, "misaligned entry size"); 13711 return (-1); 13712 } 13713 13714 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 13715 dtrace_dof_error(dof, "invalid entry size"); 13716 return (-1); 13717 } 13718 13719 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 13720 dtrace_dof_error(dof, "misaligned section offset"); 13721 return (-1); 13722 } 13723 13724 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 13725 dtrace_dof_error(dof, "invalid entry size"); 13726 return (-1); 13727 } 13728 13729 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 13730 13731 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 13732 13733 /* 13734 * Take a pass through the probes to check for errors. 13735 */ 13736 for (j = 0; j < nprobes; j++) { 13737 probe = (dof_probe_t *)(uintptr_t)(daddr + 13738 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 13739 13740 if (probe->dofpr_func >= str_sec->dofs_size) { 13741 dtrace_dof_error(dof, "invalid function name"); 13742 return (-1); 13743 } 13744 13745 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 13746 dtrace_dof_error(dof, "function name too long"); 13747 return (-1); 13748 } 13749 13750 if (probe->dofpr_name >= str_sec->dofs_size || 13751 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 13752 dtrace_dof_error(dof, "invalid probe name"); 13753 return (-1); 13754 } 13755 13756 /* 13757 * The offset count must not wrap the index, and the offsets 13758 * must also not overflow the section's data. 13759 */ 13760 if (probe->dofpr_offidx + probe->dofpr_noffs < 13761 probe->dofpr_offidx || 13762 (probe->dofpr_offidx + probe->dofpr_noffs) * 13763 off_sec->dofs_entsize > off_sec->dofs_size) { 13764 dtrace_dof_error(dof, "invalid probe offset"); 13765 return (-1); 13766 } 13767 13768 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 13769 /* 13770 * If there's no is-enabled offset section, make sure 13771 * there aren't any is-enabled offsets. Otherwise 13772 * perform the same checks as for probe offsets 13773 * (immediately above). 13774 */ 13775 if (enoff_sec == NULL) { 13776 if (probe->dofpr_enoffidx != 0 || 13777 probe->dofpr_nenoffs != 0) { 13778 dtrace_dof_error(dof, "is-enabled " 13779 "offsets with null section"); 13780 return (-1); 13781 } 13782 } else if (probe->dofpr_enoffidx + 13783 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 13784 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 13785 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 13786 dtrace_dof_error(dof, "invalid is-enabled " 13787 "offset"); 13788 return (-1); 13789 } 13790 13791 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 13792 dtrace_dof_error(dof, "zero probe and " 13793 "is-enabled offsets"); 13794 return (-1); 13795 } 13796 } else if (probe->dofpr_noffs == 0) { 13797 dtrace_dof_error(dof, "zero probe offsets"); 13798 return (-1); 13799 } 13800 13801 if (probe->dofpr_argidx + probe->dofpr_xargc < 13802 probe->dofpr_argidx || 13803 (probe->dofpr_argidx + probe->dofpr_xargc) * 13804 arg_sec->dofs_entsize > arg_sec->dofs_size) { 13805 dtrace_dof_error(dof, "invalid args"); 13806 return (-1); 13807 } 13808 13809 typeidx = probe->dofpr_nargv; 13810 typestr = strtab + probe->dofpr_nargv; 13811 for (k = 0; k < probe->dofpr_nargc; k++) { 13812 if (typeidx >= str_sec->dofs_size) { 13813 dtrace_dof_error(dof, "bad " 13814 "native argument type"); 13815 return (-1); 13816 } 13817 13818 typesz = strlen(typestr) + 1; 13819 if (typesz > DTRACE_ARGTYPELEN) { 13820 dtrace_dof_error(dof, "native " 13821 "argument type too long"); 13822 return (-1); 13823 } 13824 typeidx += typesz; 13825 typestr += typesz; 13826 } 13827 13828 typeidx = probe->dofpr_xargv; 13829 typestr = strtab + probe->dofpr_xargv; 13830 for (k = 0; k < probe->dofpr_xargc; k++) { 13831 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 13832 dtrace_dof_error(dof, "bad " 13833 "native argument index"); 13834 return (-1); 13835 } 13836 13837 if (typeidx >= str_sec->dofs_size) { 13838 dtrace_dof_error(dof, "bad " 13839 "translated argument type"); 13840 return (-1); 13841 } 13842 13843 typesz = strlen(typestr) + 1; 13844 if (typesz > DTRACE_ARGTYPELEN) { 13845 dtrace_dof_error(dof, "translated argument " 13846 "type too long"); 13847 return (-1); 13848 } 13849 13850 typeidx += typesz; 13851 typestr += typesz; 13852 } 13853 } 13854 13855 return (0); 13856 } 13857 13858 static int 13859 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 13860 { 13861 dtrace_helpers_t *help; 13862 dtrace_vstate_t *vstate; 13863 dtrace_enabling_t *enab = NULL; 13864 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 13865 uintptr_t daddr = (uintptr_t)dof; 13866 13867 ASSERT(MUTEX_HELD(&dtrace_lock)); 13868 13869 if ((help = curproc->p_dtrace_helpers) == NULL) 13870 help = dtrace_helpers_create(curproc); 13871 13872 vstate = &help->dthps_vstate; 13873 13874 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 13875 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 13876 dtrace_dof_destroy(dof); 13877 return (rv); 13878 } 13879 13880 /* 13881 * Look for helper providers and validate their descriptions. 13882 */ 13883 if (dhp != NULL) { 13884 for (i = 0; i < dof->dofh_secnum; i++) { 13885 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 13886 dof->dofh_secoff + i * dof->dofh_secsize); 13887 13888 if (sec->dofs_type != DOF_SECT_PROVIDER) 13889 continue; 13890 13891 if (dtrace_helper_provider_validate(dof, sec) != 0) { 13892 dtrace_enabling_destroy(enab); 13893 dtrace_dof_destroy(dof); 13894 return (-1); 13895 } 13896 13897 nprovs++; 13898 } 13899 } 13900 13901 /* 13902 * Now we need to walk through the ECB descriptions in the enabling. 13903 */ 13904 for (i = 0; i < enab->dten_ndesc; i++) { 13905 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 13906 dtrace_probedesc_t *desc = &ep->dted_probe; 13907 13908 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 13909 continue; 13910 13911 if (strcmp(desc->dtpd_mod, "helper") != 0) 13912 continue; 13913 13914 if (strcmp(desc->dtpd_func, "ustack") != 0) 13915 continue; 13916 13917 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 13918 ep)) != 0) { 13919 /* 13920 * Adding this helper action failed -- we are now going 13921 * to rip out the entire generation and return failure. 13922 */ 13923 (void) dtrace_helper_destroygen(help->dthps_generation); 13924 dtrace_enabling_destroy(enab); 13925 dtrace_dof_destroy(dof); 13926 return (-1); 13927 } 13928 13929 nhelpers++; 13930 } 13931 13932 if (nhelpers < enab->dten_ndesc) 13933 dtrace_dof_error(dof, "unmatched helpers"); 13934 13935 gen = help->dthps_generation++; 13936 dtrace_enabling_destroy(enab); 13937 13938 if (dhp != NULL && nprovs > 0) { 13939 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 13940 if (dtrace_helper_provider_add(dhp, gen) == 0) { 13941 mutex_exit(&dtrace_lock); 13942 dtrace_helper_provider_register(curproc, help, dhp); 13943 mutex_enter(&dtrace_lock); 13944 13945 destroy = 0; 13946 } 13947 } 13948 13949 if (destroy) 13950 dtrace_dof_destroy(dof); 13951 13952 return (gen); 13953 } 13954 13955 static dtrace_helpers_t * 13956 dtrace_helpers_create(proc_t *p) 13957 { 13958 dtrace_helpers_t *help; 13959 13960 ASSERT(MUTEX_HELD(&dtrace_lock)); 13961 ASSERT(p->p_dtrace_helpers == NULL); 13962 13963 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 13964 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 13965 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 13966 13967 p->p_dtrace_helpers = help; 13968 dtrace_helpers++; 13969 13970 return (help); 13971 } 13972 13973 static void 13974 dtrace_helpers_destroy(void) 13975 { 13976 dtrace_helpers_t *help; 13977 dtrace_vstate_t *vstate; 13978 proc_t *p = curproc; 13979 int i; 13980 13981 mutex_enter(&dtrace_lock); 13982 13983 ASSERT(p->p_dtrace_helpers != NULL); 13984 ASSERT(dtrace_helpers > 0); 13985 13986 help = p->p_dtrace_helpers; 13987 vstate = &help->dthps_vstate; 13988 13989 /* 13990 * We're now going to lose the help from this process. 13991 */ 13992 p->p_dtrace_helpers = NULL; 13993 dtrace_sync(); 13994 13995 /* 13996 * Destory the helper actions. 13997 */ 13998 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13999 dtrace_helper_action_t *h, *next; 14000 14001 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14002 next = h->dtha_next; 14003 dtrace_helper_action_destroy(h, vstate); 14004 h = next; 14005 } 14006 } 14007 14008 mutex_exit(&dtrace_lock); 14009 14010 /* 14011 * Destroy the helper providers. 14012 */ 14013 if (help->dthps_maxprovs > 0) { 14014 mutex_enter(&dtrace_meta_lock); 14015 if (dtrace_meta_pid != NULL) { 14016 ASSERT(dtrace_deferred_pid == NULL); 14017 14018 for (i = 0; i < help->dthps_nprovs; i++) { 14019 dtrace_helper_provider_remove( 14020 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14021 } 14022 } else { 14023 mutex_enter(&dtrace_lock); 14024 ASSERT(help->dthps_deferred == 0 || 14025 help->dthps_next != NULL || 14026 help->dthps_prev != NULL || 14027 help == dtrace_deferred_pid); 14028 14029 /* 14030 * Remove the helper from the deferred list. 14031 */ 14032 if (help->dthps_next != NULL) 14033 help->dthps_next->dthps_prev = help->dthps_prev; 14034 if (help->dthps_prev != NULL) 14035 help->dthps_prev->dthps_next = help->dthps_next; 14036 if (dtrace_deferred_pid == help) { 14037 dtrace_deferred_pid = help->dthps_next; 14038 ASSERT(help->dthps_prev == NULL); 14039 } 14040 14041 mutex_exit(&dtrace_lock); 14042 } 14043 14044 mutex_exit(&dtrace_meta_lock); 14045 14046 for (i = 0; i < help->dthps_nprovs; i++) { 14047 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14048 } 14049 14050 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14051 sizeof (dtrace_helper_provider_t *)); 14052 } 14053 14054 mutex_enter(&dtrace_lock); 14055 14056 dtrace_vstate_fini(&help->dthps_vstate); 14057 kmem_free(help->dthps_actions, 14058 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14059 kmem_free(help, sizeof (dtrace_helpers_t)); 14060 14061 --dtrace_helpers; 14062 mutex_exit(&dtrace_lock); 14063 } 14064 14065 static void 14066 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14067 { 14068 dtrace_helpers_t *help, *newhelp; 14069 dtrace_helper_action_t *helper, *new, *last; 14070 dtrace_difo_t *dp; 14071 dtrace_vstate_t *vstate; 14072 int i, j, sz, hasprovs = 0; 14073 14074 mutex_enter(&dtrace_lock); 14075 ASSERT(from->p_dtrace_helpers != NULL); 14076 ASSERT(dtrace_helpers > 0); 14077 14078 help = from->p_dtrace_helpers; 14079 newhelp = dtrace_helpers_create(to); 14080 ASSERT(to->p_dtrace_helpers != NULL); 14081 14082 newhelp->dthps_generation = help->dthps_generation; 14083 vstate = &newhelp->dthps_vstate; 14084 14085 /* 14086 * Duplicate the helper actions. 14087 */ 14088 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14089 if ((helper = help->dthps_actions[i]) == NULL) 14090 continue; 14091 14092 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14093 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14094 KM_SLEEP); 14095 new->dtha_generation = helper->dtha_generation; 14096 14097 if ((dp = helper->dtha_predicate) != NULL) { 14098 dp = dtrace_difo_duplicate(dp, vstate); 14099 new->dtha_predicate = dp; 14100 } 14101 14102 new->dtha_nactions = helper->dtha_nactions; 14103 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14104 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14105 14106 for (j = 0; j < new->dtha_nactions; j++) { 14107 dtrace_difo_t *dp = helper->dtha_actions[j]; 14108 14109 ASSERT(dp != NULL); 14110 dp = dtrace_difo_duplicate(dp, vstate); 14111 new->dtha_actions[j] = dp; 14112 } 14113 14114 if (last != NULL) { 14115 last->dtha_next = new; 14116 } else { 14117 newhelp->dthps_actions[i] = new; 14118 } 14119 14120 last = new; 14121 } 14122 } 14123 14124 /* 14125 * Duplicate the helper providers and register them with the 14126 * DTrace framework. 14127 */ 14128 if (help->dthps_nprovs > 0) { 14129 newhelp->dthps_nprovs = help->dthps_nprovs; 14130 newhelp->dthps_maxprovs = help->dthps_nprovs; 14131 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14132 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14133 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14134 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14135 newhelp->dthps_provs[i]->dthp_ref++; 14136 } 14137 14138 hasprovs = 1; 14139 } 14140 14141 mutex_exit(&dtrace_lock); 14142 14143 if (hasprovs) 14144 dtrace_helper_provider_register(to, newhelp, NULL); 14145 } 14146 14147 /* 14148 * DTrace Hook Functions 14149 */ 14150 static void 14151 dtrace_module_loaded(struct modctl *ctl) 14152 { 14153 dtrace_provider_t *prv; 14154 14155 mutex_enter(&dtrace_provider_lock); 14156 mutex_enter(&mod_lock); 14157 14158 ASSERT(ctl->mod_busy); 14159 14160 /* 14161 * We're going to call each providers per-module provide operation 14162 * specifying only this module. 14163 */ 14164 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14165 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14166 14167 mutex_exit(&mod_lock); 14168 mutex_exit(&dtrace_provider_lock); 14169 14170 /* 14171 * If we have any retained enablings, we need to match against them. 14172 * Enabling probes requires that cpu_lock be held, and we cannot hold 14173 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14174 * module. (In particular, this happens when loading scheduling 14175 * classes.) So if we have any retained enablings, we need to dispatch 14176 * our task queue to do the match for us. 14177 */ 14178 mutex_enter(&dtrace_lock); 14179 14180 if (dtrace_retained == NULL) { 14181 mutex_exit(&dtrace_lock); 14182 return; 14183 } 14184 14185 (void) taskq_dispatch(dtrace_taskq, 14186 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14187 14188 mutex_exit(&dtrace_lock); 14189 14190 /* 14191 * And now, for a little heuristic sleaze: in general, we want to 14192 * match modules as soon as they load. However, we cannot guarantee 14193 * this, because it would lead us to the lock ordering violation 14194 * outlined above. The common case, of course, is that cpu_lock is 14195 * _not_ held -- so we delay here for a clock tick, hoping that that's 14196 * long enough for the task queue to do its work. If it's not, it's 14197 * not a serious problem -- it just means that the module that we 14198 * just loaded may not be immediately instrumentable. 14199 */ 14200 delay(1); 14201 } 14202 14203 static void 14204 dtrace_module_unloaded(struct modctl *ctl) 14205 { 14206 dtrace_probe_t template, *probe, *first, *next; 14207 dtrace_provider_t *prov; 14208 14209 template.dtpr_mod = ctl->mod_modname; 14210 14211 mutex_enter(&dtrace_provider_lock); 14212 mutex_enter(&mod_lock); 14213 mutex_enter(&dtrace_lock); 14214 14215 if (dtrace_bymod == NULL) { 14216 /* 14217 * The DTrace module is loaded (obviously) but not attached; 14218 * we don't have any work to do. 14219 */ 14220 mutex_exit(&dtrace_provider_lock); 14221 mutex_exit(&mod_lock); 14222 mutex_exit(&dtrace_lock); 14223 return; 14224 } 14225 14226 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14227 probe != NULL; probe = probe->dtpr_nextmod) { 14228 if (probe->dtpr_ecb != NULL) { 14229 mutex_exit(&dtrace_provider_lock); 14230 mutex_exit(&mod_lock); 14231 mutex_exit(&dtrace_lock); 14232 14233 /* 14234 * This shouldn't _actually_ be possible -- we're 14235 * unloading a module that has an enabled probe in it. 14236 * (It's normally up to the provider to make sure that 14237 * this can't happen.) However, because dtps_enable() 14238 * doesn't have a failure mode, there can be an 14239 * enable/unload race. Upshot: we don't want to 14240 * assert, but we're not going to disable the 14241 * probe, either. 14242 */ 14243 if (dtrace_err_verbose) { 14244 cmn_err(CE_WARN, "unloaded module '%s' had " 14245 "enabled probes", ctl->mod_modname); 14246 } 14247 14248 return; 14249 } 14250 } 14251 14252 probe = first; 14253 14254 for (first = NULL; probe != NULL; probe = next) { 14255 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14256 14257 dtrace_probes[probe->dtpr_id - 1] = NULL; 14258 14259 next = probe->dtpr_nextmod; 14260 dtrace_hash_remove(dtrace_bymod, probe); 14261 dtrace_hash_remove(dtrace_byfunc, probe); 14262 dtrace_hash_remove(dtrace_byname, probe); 14263 14264 if (first == NULL) { 14265 first = probe; 14266 probe->dtpr_nextmod = NULL; 14267 } else { 14268 probe->dtpr_nextmod = first; 14269 first = probe; 14270 } 14271 } 14272 14273 /* 14274 * We've removed all of the module's probes from the hash chains and 14275 * from the probe array. Now issue a dtrace_sync() to be sure that 14276 * everyone has cleared out from any probe array processing. 14277 */ 14278 dtrace_sync(); 14279 14280 for (probe = first; probe != NULL; probe = first) { 14281 first = probe->dtpr_nextmod; 14282 prov = probe->dtpr_provider; 14283 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14284 probe->dtpr_arg); 14285 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14286 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14287 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14288 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14289 kmem_free(probe, sizeof (dtrace_probe_t)); 14290 } 14291 14292 mutex_exit(&dtrace_lock); 14293 mutex_exit(&mod_lock); 14294 mutex_exit(&dtrace_provider_lock); 14295 } 14296 14297 void 14298 dtrace_suspend(void) 14299 { 14300 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14301 } 14302 14303 void 14304 dtrace_resume(void) 14305 { 14306 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14307 } 14308 14309 static int 14310 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14311 { 14312 ASSERT(MUTEX_HELD(&cpu_lock)); 14313 mutex_enter(&dtrace_lock); 14314 14315 switch (what) { 14316 case CPU_CONFIG: { 14317 dtrace_state_t *state; 14318 dtrace_optval_t *opt, rs, c; 14319 14320 /* 14321 * For now, we only allocate a new buffer for anonymous state. 14322 */ 14323 if ((state = dtrace_anon.dta_state) == NULL) 14324 break; 14325 14326 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14327 break; 14328 14329 opt = state->dts_options; 14330 c = opt[DTRACEOPT_CPU]; 14331 14332 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14333 break; 14334 14335 /* 14336 * Regardless of what the actual policy is, we're going to 14337 * temporarily set our resize policy to be manual. We're 14338 * also going to temporarily set our CPU option to denote 14339 * the newly configured CPU. 14340 */ 14341 rs = opt[DTRACEOPT_BUFRESIZE]; 14342 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14343 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14344 14345 (void) dtrace_state_buffers(state); 14346 14347 opt[DTRACEOPT_BUFRESIZE] = rs; 14348 opt[DTRACEOPT_CPU] = c; 14349 14350 break; 14351 } 14352 14353 case CPU_UNCONFIG: 14354 /* 14355 * We don't free the buffer in the CPU_UNCONFIG case. (The 14356 * buffer will be freed when the consumer exits.) 14357 */ 14358 break; 14359 14360 default: 14361 break; 14362 } 14363 14364 mutex_exit(&dtrace_lock); 14365 return (0); 14366 } 14367 14368 static void 14369 dtrace_cpu_setup_initial(processorid_t cpu) 14370 { 14371 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 14372 } 14373 14374 static void 14375 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 14376 { 14377 if (dtrace_toxranges >= dtrace_toxranges_max) { 14378 int osize, nsize; 14379 dtrace_toxrange_t *range; 14380 14381 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14382 14383 if (osize == 0) { 14384 ASSERT(dtrace_toxrange == NULL); 14385 ASSERT(dtrace_toxranges_max == 0); 14386 dtrace_toxranges_max = 1; 14387 } else { 14388 dtrace_toxranges_max <<= 1; 14389 } 14390 14391 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14392 range = kmem_zalloc(nsize, KM_SLEEP); 14393 14394 if (dtrace_toxrange != NULL) { 14395 ASSERT(osize != 0); 14396 bcopy(dtrace_toxrange, range, osize); 14397 kmem_free(dtrace_toxrange, osize); 14398 } 14399 14400 dtrace_toxrange = range; 14401 } 14402 14403 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 14404 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 14405 14406 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 14407 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 14408 dtrace_toxranges++; 14409 } 14410 14411 /* 14412 * DTrace Driver Cookbook Functions 14413 */ 14414 /*ARGSUSED*/ 14415 static int 14416 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 14417 { 14418 dtrace_provider_id_t id; 14419 dtrace_state_t *state = NULL; 14420 dtrace_enabling_t *enab; 14421 14422 mutex_enter(&cpu_lock); 14423 mutex_enter(&dtrace_provider_lock); 14424 mutex_enter(&dtrace_lock); 14425 14426 if (ddi_soft_state_init(&dtrace_softstate, 14427 sizeof (dtrace_state_t), 0) != 0) { 14428 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 14429 mutex_exit(&cpu_lock); 14430 mutex_exit(&dtrace_provider_lock); 14431 mutex_exit(&dtrace_lock); 14432 return (DDI_FAILURE); 14433 } 14434 14435 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 14436 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 14437 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 14438 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 14439 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 14440 ddi_remove_minor_node(devi, NULL); 14441 ddi_soft_state_fini(&dtrace_softstate); 14442 mutex_exit(&cpu_lock); 14443 mutex_exit(&dtrace_provider_lock); 14444 mutex_exit(&dtrace_lock); 14445 return (DDI_FAILURE); 14446 } 14447 14448 ddi_report_dev(devi); 14449 dtrace_devi = devi; 14450 14451 dtrace_modload = dtrace_module_loaded; 14452 dtrace_modunload = dtrace_module_unloaded; 14453 dtrace_cpu_init = dtrace_cpu_setup_initial; 14454 dtrace_helpers_cleanup = dtrace_helpers_destroy; 14455 dtrace_helpers_fork = dtrace_helpers_duplicate; 14456 dtrace_cpustart_init = dtrace_suspend; 14457 dtrace_cpustart_fini = dtrace_resume; 14458 dtrace_debugger_init = dtrace_suspend; 14459 dtrace_debugger_fini = dtrace_resume; 14460 14461 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14462 14463 ASSERT(MUTEX_HELD(&cpu_lock)); 14464 14465 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 14466 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14467 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 14468 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 14469 VM_SLEEP | VMC_IDENTIFIER); 14470 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 14471 1, INT_MAX, 0); 14472 14473 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 14474 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 14475 NULL, NULL, NULL, NULL, NULL, 0); 14476 14477 ASSERT(MUTEX_HELD(&cpu_lock)); 14478 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 14479 offsetof(dtrace_probe_t, dtpr_nextmod), 14480 offsetof(dtrace_probe_t, dtpr_prevmod)); 14481 14482 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 14483 offsetof(dtrace_probe_t, dtpr_nextfunc), 14484 offsetof(dtrace_probe_t, dtpr_prevfunc)); 14485 14486 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 14487 offsetof(dtrace_probe_t, dtpr_nextname), 14488 offsetof(dtrace_probe_t, dtpr_prevname)); 14489 14490 if (dtrace_retain_max < 1) { 14491 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 14492 "setting to 1", dtrace_retain_max); 14493 dtrace_retain_max = 1; 14494 } 14495 14496 /* 14497 * Now discover our toxic ranges. 14498 */ 14499 dtrace_toxic_ranges(dtrace_toxrange_add); 14500 14501 /* 14502 * Before we register ourselves as a provider to our own framework, 14503 * we would like to assert that dtrace_provider is NULL -- but that's 14504 * not true if we were loaded as a dependency of a DTrace provider. 14505 * Once we've registered, we can assert that dtrace_provider is our 14506 * pseudo provider. 14507 */ 14508 (void) dtrace_register("dtrace", &dtrace_provider_attr, 14509 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 14510 14511 ASSERT(dtrace_provider != NULL); 14512 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 14513 14514 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 14515 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 14516 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 14517 dtrace_provider, NULL, NULL, "END", 0, NULL); 14518 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 14519 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 14520 14521 dtrace_anon_property(); 14522 mutex_exit(&cpu_lock); 14523 14524 /* 14525 * If DTrace helper tracing is enabled, we need to allocate the 14526 * trace buffer and initialize the values. 14527 */ 14528 if (dtrace_helptrace_enabled) { 14529 ASSERT(dtrace_helptrace_buffer == NULL); 14530 dtrace_helptrace_buffer = 14531 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 14532 dtrace_helptrace_next = 0; 14533 } 14534 14535 /* 14536 * If there are already providers, we must ask them to provide their 14537 * probes, and then match any anonymous enabling against them. Note 14538 * that there should be no other retained enablings at this time: 14539 * the only retained enablings at this time should be the anonymous 14540 * enabling. 14541 */ 14542 if (dtrace_anon.dta_enabling != NULL) { 14543 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 14544 14545 dtrace_enabling_provide(NULL); 14546 state = dtrace_anon.dta_state; 14547 14548 /* 14549 * We couldn't hold cpu_lock across the above call to 14550 * dtrace_enabling_provide(), but we must hold it to actually 14551 * enable the probes. We have to drop all of our locks, pick 14552 * up cpu_lock, and regain our locks before matching the 14553 * retained anonymous enabling. 14554 */ 14555 mutex_exit(&dtrace_lock); 14556 mutex_exit(&dtrace_provider_lock); 14557 14558 mutex_enter(&cpu_lock); 14559 mutex_enter(&dtrace_provider_lock); 14560 mutex_enter(&dtrace_lock); 14561 14562 if ((enab = dtrace_anon.dta_enabling) != NULL) 14563 (void) dtrace_enabling_match(enab, NULL); 14564 14565 mutex_exit(&cpu_lock); 14566 } 14567 14568 mutex_exit(&dtrace_lock); 14569 mutex_exit(&dtrace_provider_lock); 14570 14571 if (state != NULL) { 14572 /* 14573 * If we created any anonymous state, set it going now. 14574 */ 14575 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 14576 } 14577 14578 return (DDI_SUCCESS); 14579 } 14580 14581 /*ARGSUSED*/ 14582 static int 14583 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 14584 { 14585 dtrace_state_t *state; 14586 uint32_t priv; 14587 uid_t uid; 14588 zoneid_t zoneid; 14589 14590 if (getminor(*devp) == DTRACEMNRN_HELPER) 14591 return (0); 14592 14593 /* 14594 * If this wasn't an open with the "helper" minor, then it must be 14595 * the "dtrace" minor. 14596 */ 14597 if (getminor(*devp) != DTRACEMNRN_DTRACE) 14598 return (ENXIO); 14599 14600 /* 14601 * If no DTRACE_PRIV_* bits are set in the credential, then the 14602 * caller lacks sufficient permission to do anything with DTrace. 14603 */ 14604 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 14605 if (priv == DTRACE_PRIV_NONE) 14606 return (EACCES); 14607 14608 /* 14609 * Ask all providers to provide all their probes. 14610 */ 14611 mutex_enter(&dtrace_provider_lock); 14612 dtrace_probe_provide(NULL, NULL); 14613 mutex_exit(&dtrace_provider_lock); 14614 14615 mutex_enter(&cpu_lock); 14616 mutex_enter(&dtrace_lock); 14617 dtrace_opens++; 14618 dtrace_membar_producer(); 14619 14620 /* 14621 * If the kernel debugger is active (that is, if the kernel debugger 14622 * modified text in some way), we won't allow the open. 14623 */ 14624 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14625 dtrace_opens--; 14626 mutex_exit(&cpu_lock); 14627 mutex_exit(&dtrace_lock); 14628 return (EBUSY); 14629 } 14630 14631 state = dtrace_state_create(devp, cred_p); 14632 mutex_exit(&cpu_lock); 14633 14634 if (state == NULL) { 14635 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 14636 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14637 mutex_exit(&dtrace_lock); 14638 return (EAGAIN); 14639 } 14640 14641 mutex_exit(&dtrace_lock); 14642 14643 return (0); 14644 } 14645 14646 /*ARGSUSED*/ 14647 static int 14648 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 14649 { 14650 minor_t minor = getminor(dev); 14651 dtrace_state_t *state; 14652 14653 if (minor == DTRACEMNRN_HELPER) 14654 return (0); 14655 14656 state = ddi_get_soft_state(dtrace_softstate, minor); 14657 14658 mutex_enter(&cpu_lock); 14659 mutex_enter(&dtrace_lock); 14660 14661 if (state->dts_anon) { 14662 /* 14663 * There is anonymous state. Destroy that first. 14664 */ 14665 ASSERT(dtrace_anon.dta_state == NULL); 14666 dtrace_state_destroy(state->dts_anon); 14667 } 14668 14669 dtrace_state_destroy(state); 14670 ASSERT(dtrace_opens > 0); 14671 14672 /* 14673 * Only relinquish control of the kernel debugger interface when there 14674 * are no consumers and no anonymous enablings. 14675 */ 14676 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 14677 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14678 14679 mutex_exit(&dtrace_lock); 14680 mutex_exit(&cpu_lock); 14681 14682 return (0); 14683 } 14684 14685 /*ARGSUSED*/ 14686 static int 14687 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 14688 { 14689 int rval; 14690 dof_helper_t help, *dhp = NULL; 14691 14692 switch (cmd) { 14693 case DTRACEHIOC_ADDDOF: 14694 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 14695 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 14696 return (EFAULT); 14697 } 14698 14699 dhp = &help; 14700 arg = (intptr_t)help.dofhp_dof; 14701 /*FALLTHROUGH*/ 14702 14703 case DTRACEHIOC_ADD: { 14704 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 14705 14706 if (dof == NULL) 14707 return (rval); 14708 14709 mutex_enter(&dtrace_lock); 14710 14711 /* 14712 * dtrace_helper_slurp() takes responsibility for the dof -- 14713 * it may free it now or it may save it and free it later. 14714 */ 14715 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 14716 *rv = rval; 14717 rval = 0; 14718 } else { 14719 rval = EINVAL; 14720 } 14721 14722 mutex_exit(&dtrace_lock); 14723 return (rval); 14724 } 14725 14726 case DTRACEHIOC_REMOVE: { 14727 mutex_enter(&dtrace_lock); 14728 rval = dtrace_helper_destroygen(arg); 14729 mutex_exit(&dtrace_lock); 14730 14731 return (rval); 14732 } 14733 14734 default: 14735 break; 14736 } 14737 14738 return (ENOTTY); 14739 } 14740 14741 /*ARGSUSED*/ 14742 static int 14743 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 14744 { 14745 minor_t minor = getminor(dev); 14746 dtrace_state_t *state; 14747 int rval; 14748 14749 if (minor == DTRACEMNRN_HELPER) 14750 return (dtrace_ioctl_helper(cmd, arg, rv)); 14751 14752 state = ddi_get_soft_state(dtrace_softstate, minor); 14753 14754 if (state->dts_anon) { 14755 ASSERT(dtrace_anon.dta_state == NULL); 14756 state = state->dts_anon; 14757 } 14758 14759 switch (cmd) { 14760 case DTRACEIOC_PROVIDER: { 14761 dtrace_providerdesc_t pvd; 14762 dtrace_provider_t *pvp; 14763 14764 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 14765 return (EFAULT); 14766 14767 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 14768 mutex_enter(&dtrace_provider_lock); 14769 14770 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 14771 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 14772 break; 14773 } 14774 14775 mutex_exit(&dtrace_provider_lock); 14776 14777 if (pvp == NULL) 14778 return (ESRCH); 14779 14780 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 14781 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 14782 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 14783 return (EFAULT); 14784 14785 return (0); 14786 } 14787 14788 case DTRACEIOC_EPROBE: { 14789 dtrace_eprobedesc_t epdesc; 14790 dtrace_ecb_t *ecb; 14791 dtrace_action_t *act; 14792 void *buf; 14793 size_t size; 14794 uintptr_t dest; 14795 int nrecs; 14796 14797 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 14798 return (EFAULT); 14799 14800 mutex_enter(&dtrace_lock); 14801 14802 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 14803 mutex_exit(&dtrace_lock); 14804 return (EINVAL); 14805 } 14806 14807 if (ecb->dte_probe == NULL) { 14808 mutex_exit(&dtrace_lock); 14809 return (EINVAL); 14810 } 14811 14812 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 14813 epdesc.dtepd_uarg = ecb->dte_uarg; 14814 epdesc.dtepd_size = ecb->dte_size; 14815 14816 nrecs = epdesc.dtepd_nrecs; 14817 epdesc.dtepd_nrecs = 0; 14818 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14819 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14820 continue; 14821 14822 epdesc.dtepd_nrecs++; 14823 } 14824 14825 /* 14826 * Now that we have the size, we need to allocate a temporary 14827 * buffer in which to store the complete description. We need 14828 * the temporary buffer to be able to drop dtrace_lock() 14829 * across the copyout(), below. 14830 */ 14831 size = sizeof (dtrace_eprobedesc_t) + 14832 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 14833 14834 buf = kmem_alloc(size, KM_SLEEP); 14835 dest = (uintptr_t)buf; 14836 14837 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 14838 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 14839 14840 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14841 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14842 continue; 14843 14844 if (nrecs-- == 0) 14845 break; 14846 14847 bcopy(&act->dta_rec, (void *)dest, 14848 sizeof (dtrace_recdesc_t)); 14849 dest += sizeof (dtrace_recdesc_t); 14850 } 14851 14852 mutex_exit(&dtrace_lock); 14853 14854 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14855 kmem_free(buf, size); 14856 return (EFAULT); 14857 } 14858 14859 kmem_free(buf, size); 14860 return (0); 14861 } 14862 14863 case DTRACEIOC_AGGDESC: { 14864 dtrace_aggdesc_t aggdesc; 14865 dtrace_action_t *act; 14866 dtrace_aggregation_t *agg; 14867 int nrecs; 14868 uint32_t offs; 14869 dtrace_recdesc_t *lrec; 14870 void *buf; 14871 size_t size; 14872 uintptr_t dest; 14873 14874 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 14875 return (EFAULT); 14876 14877 mutex_enter(&dtrace_lock); 14878 14879 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 14880 mutex_exit(&dtrace_lock); 14881 return (EINVAL); 14882 } 14883 14884 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 14885 14886 nrecs = aggdesc.dtagd_nrecs; 14887 aggdesc.dtagd_nrecs = 0; 14888 14889 offs = agg->dtag_base; 14890 lrec = &agg->dtag_action.dta_rec; 14891 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 14892 14893 for (act = agg->dtag_first; ; act = act->dta_next) { 14894 ASSERT(act->dta_intuple || 14895 DTRACEACT_ISAGG(act->dta_kind)); 14896 14897 /* 14898 * If this action has a record size of zero, it 14899 * denotes an argument to the aggregating action. 14900 * Because the presence of this record doesn't (or 14901 * shouldn't) affect the way the data is interpreted, 14902 * we don't copy it out to save user-level the 14903 * confusion of dealing with a zero-length record. 14904 */ 14905 if (act->dta_rec.dtrd_size == 0) { 14906 ASSERT(agg->dtag_hasarg); 14907 continue; 14908 } 14909 14910 aggdesc.dtagd_nrecs++; 14911 14912 if (act == &agg->dtag_action) 14913 break; 14914 } 14915 14916 /* 14917 * Now that we have the size, we need to allocate a temporary 14918 * buffer in which to store the complete description. We need 14919 * the temporary buffer to be able to drop dtrace_lock() 14920 * across the copyout(), below. 14921 */ 14922 size = sizeof (dtrace_aggdesc_t) + 14923 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 14924 14925 buf = kmem_alloc(size, KM_SLEEP); 14926 dest = (uintptr_t)buf; 14927 14928 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 14929 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 14930 14931 for (act = agg->dtag_first; ; act = act->dta_next) { 14932 dtrace_recdesc_t rec = act->dta_rec; 14933 14934 /* 14935 * See the comment in the above loop for why we pass 14936 * over zero-length records. 14937 */ 14938 if (rec.dtrd_size == 0) { 14939 ASSERT(agg->dtag_hasarg); 14940 continue; 14941 } 14942 14943 if (nrecs-- == 0) 14944 break; 14945 14946 rec.dtrd_offset -= offs; 14947 bcopy(&rec, (void *)dest, sizeof (rec)); 14948 dest += sizeof (dtrace_recdesc_t); 14949 14950 if (act == &agg->dtag_action) 14951 break; 14952 } 14953 14954 mutex_exit(&dtrace_lock); 14955 14956 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14957 kmem_free(buf, size); 14958 return (EFAULT); 14959 } 14960 14961 kmem_free(buf, size); 14962 return (0); 14963 } 14964 14965 case DTRACEIOC_ENABLE: { 14966 dof_hdr_t *dof; 14967 dtrace_enabling_t *enab = NULL; 14968 dtrace_vstate_t *vstate; 14969 int err = 0; 14970 14971 *rv = 0; 14972 14973 /* 14974 * If a NULL argument has been passed, we take this as our 14975 * cue to reevaluate our enablings. 14976 */ 14977 if (arg == NULL) { 14978 dtrace_enabling_matchall(); 14979 14980 return (0); 14981 } 14982 14983 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 14984 return (rval); 14985 14986 mutex_enter(&cpu_lock); 14987 mutex_enter(&dtrace_lock); 14988 vstate = &state->dts_vstate; 14989 14990 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14991 mutex_exit(&dtrace_lock); 14992 mutex_exit(&cpu_lock); 14993 dtrace_dof_destroy(dof); 14994 return (EBUSY); 14995 } 14996 14997 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 14998 mutex_exit(&dtrace_lock); 14999 mutex_exit(&cpu_lock); 15000 dtrace_dof_destroy(dof); 15001 return (EINVAL); 15002 } 15003 15004 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15005 dtrace_enabling_destroy(enab); 15006 mutex_exit(&dtrace_lock); 15007 mutex_exit(&cpu_lock); 15008 dtrace_dof_destroy(dof); 15009 return (rval); 15010 } 15011 15012 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15013 err = dtrace_enabling_retain(enab); 15014 } else { 15015 dtrace_enabling_destroy(enab); 15016 } 15017 15018 mutex_exit(&cpu_lock); 15019 mutex_exit(&dtrace_lock); 15020 dtrace_dof_destroy(dof); 15021 15022 return (err); 15023 } 15024 15025 case DTRACEIOC_REPLICATE: { 15026 dtrace_repldesc_t desc; 15027 dtrace_probedesc_t *match = &desc.dtrpd_match; 15028 dtrace_probedesc_t *create = &desc.dtrpd_create; 15029 int err; 15030 15031 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15032 return (EFAULT); 15033 15034 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15035 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15036 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15037 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15038 15039 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15040 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15041 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15042 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15043 15044 mutex_enter(&dtrace_lock); 15045 err = dtrace_enabling_replicate(state, match, create); 15046 mutex_exit(&dtrace_lock); 15047 15048 return (err); 15049 } 15050 15051 case DTRACEIOC_PROBEMATCH: 15052 case DTRACEIOC_PROBES: { 15053 dtrace_probe_t *probe = NULL; 15054 dtrace_probedesc_t desc; 15055 dtrace_probekey_t pkey; 15056 dtrace_id_t i; 15057 int m = 0; 15058 uint32_t priv; 15059 uid_t uid; 15060 zoneid_t zoneid; 15061 15062 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15063 return (EFAULT); 15064 15065 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15066 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15067 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15068 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15069 15070 /* 15071 * Before we attempt to match this probe, we want to give 15072 * all providers the opportunity to provide it. 15073 */ 15074 if (desc.dtpd_id == DTRACE_IDNONE) { 15075 mutex_enter(&dtrace_provider_lock); 15076 dtrace_probe_provide(&desc, NULL); 15077 mutex_exit(&dtrace_provider_lock); 15078 desc.dtpd_id++; 15079 } 15080 15081 if (cmd == DTRACEIOC_PROBEMATCH) { 15082 dtrace_probekey(&desc, &pkey); 15083 pkey.dtpk_id = DTRACE_IDNONE; 15084 } 15085 15086 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15087 15088 mutex_enter(&dtrace_lock); 15089 15090 if (cmd == DTRACEIOC_PROBEMATCH) { 15091 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15092 if ((probe = dtrace_probes[i - 1]) != NULL && 15093 (m = dtrace_match_probe(probe, &pkey, 15094 priv, uid, zoneid)) != 0) 15095 break; 15096 } 15097 15098 if (m < 0) { 15099 mutex_exit(&dtrace_lock); 15100 return (EINVAL); 15101 } 15102 15103 } else { 15104 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15105 if ((probe = dtrace_probes[i - 1]) != NULL && 15106 dtrace_match_priv(probe, priv, uid, zoneid)) 15107 break; 15108 } 15109 } 15110 15111 if (probe == NULL) { 15112 mutex_exit(&dtrace_lock); 15113 return (ESRCH); 15114 } 15115 15116 dtrace_probe_description(probe, &desc); 15117 mutex_exit(&dtrace_lock); 15118 15119 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15120 return (EFAULT); 15121 15122 return (0); 15123 } 15124 15125 case DTRACEIOC_PROBEARG: { 15126 dtrace_argdesc_t desc; 15127 dtrace_probe_t *probe; 15128 dtrace_provider_t *prov; 15129 15130 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15131 return (EFAULT); 15132 15133 if (desc.dtargd_id == DTRACE_IDNONE) 15134 return (EINVAL); 15135 15136 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15137 return (EINVAL); 15138 15139 mutex_enter(&dtrace_provider_lock); 15140 mutex_enter(&mod_lock); 15141 mutex_enter(&dtrace_lock); 15142 15143 if (desc.dtargd_id > dtrace_nprobes) { 15144 mutex_exit(&dtrace_lock); 15145 mutex_exit(&mod_lock); 15146 mutex_exit(&dtrace_provider_lock); 15147 return (EINVAL); 15148 } 15149 15150 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15151 mutex_exit(&dtrace_lock); 15152 mutex_exit(&mod_lock); 15153 mutex_exit(&dtrace_provider_lock); 15154 return (EINVAL); 15155 } 15156 15157 mutex_exit(&dtrace_lock); 15158 15159 prov = probe->dtpr_provider; 15160 15161 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15162 /* 15163 * There isn't any typed information for this probe. 15164 * Set the argument number to DTRACE_ARGNONE. 15165 */ 15166 desc.dtargd_ndx = DTRACE_ARGNONE; 15167 } else { 15168 desc.dtargd_native[0] = '\0'; 15169 desc.dtargd_xlate[0] = '\0'; 15170 desc.dtargd_mapping = desc.dtargd_ndx; 15171 15172 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15173 probe->dtpr_id, probe->dtpr_arg, &desc); 15174 } 15175 15176 mutex_exit(&mod_lock); 15177 mutex_exit(&dtrace_provider_lock); 15178 15179 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15180 return (EFAULT); 15181 15182 return (0); 15183 } 15184 15185 case DTRACEIOC_GO: { 15186 processorid_t cpuid; 15187 rval = dtrace_state_go(state, &cpuid); 15188 15189 if (rval != 0) 15190 return (rval); 15191 15192 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15193 return (EFAULT); 15194 15195 return (0); 15196 } 15197 15198 case DTRACEIOC_STOP: { 15199 processorid_t cpuid; 15200 15201 mutex_enter(&dtrace_lock); 15202 rval = dtrace_state_stop(state, &cpuid); 15203 mutex_exit(&dtrace_lock); 15204 15205 if (rval != 0) 15206 return (rval); 15207 15208 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15209 return (EFAULT); 15210 15211 return (0); 15212 } 15213 15214 case DTRACEIOC_DOFGET: { 15215 dof_hdr_t hdr, *dof; 15216 uint64_t len; 15217 15218 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15219 return (EFAULT); 15220 15221 mutex_enter(&dtrace_lock); 15222 dof = dtrace_dof_create(state); 15223 mutex_exit(&dtrace_lock); 15224 15225 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15226 rval = copyout(dof, (void *)arg, len); 15227 dtrace_dof_destroy(dof); 15228 15229 return (rval == 0 ? 0 : EFAULT); 15230 } 15231 15232 case DTRACEIOC_AGGSNAP: 15233 case DTRACEIOC_BUFSNAP: { 15234 dtrace_bufdesc_t desc; 15235 caddr_t cached; 15236 dtrace_buffer_t *buf; 15237 15238 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15239 return (EFAULT); 15240 15241 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 15242 return (EINVAL); 15243 15244 mutex_enter(&dtrace_lock); 15245 15246 if (cmd == DTRACEIOC_BUFSNAP) { 15247 buf = &state->dts_buffer[desc.dtbd_cpu]; 15248 } else { 15249 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 15250 } 15251 15252 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 15253 size_t sz = buf->dtb_offset; 15254 15255 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 15256 mutex_exit(&dtrace_lock); 15257 return (EBUSY); 15258 } 15259 15260 /* 15261 * If this buffer has already been consumed, we're 15262 * going to indicate that there's nothing left here 15263 * to consume. 15264 */ 15265 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 15266 mutex_exit(&dtrace_lock); 15267 15268 desc.dtbd_size = 0; 15269 desc.dtbd_drops = 0; 15270 desc.dtbd_errors = 0; 15271 desc.dtbd_oldest = 0; 15272 sz = sizeof (desc); 15273 15274 if (copyout(&desc, (void *)arg, sz) != 0) 15275 return (EFAULT); 15276 15277 return (0); 15278 } 15279 15280 /* 15281 * If this is a ring buffer that has wrapped, we want 15282 * to copy the whole thing out. 15283 */ 15284 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 15285 dtrace_buffer_polish(buf); 15286 sz = buf->dtb_size; 15287 } 15288 15289 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 15290 mutex_exit(&dtrace_lock); 15291 return (EFAULT); 15292 } 15293 15294 desc.dtbd_size = sz; 15295 desc.dtbd_drops = buf->dtb_drops; 15296 desc.dtbd_errors = buf->dtb_errors; 15297 desc.dtbd_oldest = buf->dtb_xamot_offset; 15298 15299 mutex_exit(&dtrace_lock); 15300 15301 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15302 return (EFAULT); 15303 15304 buf->dtb_flags |= DTRACEBUF_CONSUMED; 15305 15306 return (0); 15307 } 15308 15309 if (buf->dtb_tomax == NULL) { 15310 ASSERT(buf->dtb_xamot == NULL); 15311 mutex_exit(&dtrace_lock); 15312 return (ENOENT); 15313 } 15314 15315 cached = buf->dtb_tomax; 15316 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 15317 15318 dtrace_xcall(desc.dtbd_cpu, 15319 (dtrace_xcall_t)dtrace_buffer_switch, buf); 15320 15321 state->dts_errors += buf->dtb_xamot_errors; 15322 15323 /* 15324 * If the buffers did not actually switch, then the cross call 15325 * did not take place -- presumably because the given CPU is 15326 * not in the ready set. If this is the case, we'll return 15327 * ENOENT. 15328 */ 15329 if (buf->dtb_tomax == cached) { 15330 ASSERT(buf->dtb_xamot != cached); 15331 mutex_exit(&dtrace_lock); 15332 return (ENOENT); 15333 } 15334 15335 ASSERT(cached == buf->dtb_xamot); 15336 15337 /* 15338 * We have our snapshot; now copy it out. 15339 */ 15340 if (copyout(buf->dtb_xamot, desc.dtbd_data, 15341 buf->dtb_xamot_offset) != 0) { 15342 mutex_exit(&dtrace_lock); 15343 return (EFAULT); 15344 } 15345 15346 desc.dtbd_size = buf->dtb_xamot_offset; 15347 desc.dtbd_drops = buf->dtb_xamot_drops; 15348 desc.dtbd_errors = buf->dtb_xamot_errors; 15349 desc.dtbd_oldest = 0; 15350 15351 mutex_exit(&dtrace_lock); 15352 15353 /* 15354 * Finally, copy out the buffer description. 15355 */ 15356 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15357 return (EFAULT); 15358 15359 return (0); 15360 } 15361 15362 case DTRACEIOC_CONF: { 15363 dtrace_conf_t conf; 15364 15365 bzero(&conf, sizeof (conf)); 15366 conf.dtc_difversion = DIF_VERSION; 15367 conf.dtc_difintregs = DIF_DIR_NREGS; 15368 conf.dtc_diftupregs = DIF_DTR_NREGS; 15369 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 15370 15371 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 15372 return (EFAULT); 15373 15374 return (0); 15375 } 15376 15377 case DTRACEIOC_STATUS: { 15378 dtrace_status_t stat; 15379 dtrace_dstate_t *dstate; 15380 int i, j; 15381 uint64_t nerrs; 15382 15383 /* 15384 * See the comment in dtrace_state_deadman() for the reason 15385 * for setting dts_laststatus to INT64_MAX before setting 15386 * it to the correct value. 15387 */ 15388 state->dts_laststatus = INT64_MAX; 15389 dtrace_membar_producer(); 15390 state->dts_laststatus = dtrace_gethrtime(); 15391 15392 bzero(&stat, sizeof (stat)); 15393 15394 mutex_enter(&dtrace_lock); 15395 15396 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 15397 mutex_exit(&dtrace_lock); 15398 return (ENOENT); 15399 } 15400 15401 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 15402 stat.dtst_exiting = 1; 15403 15404 nerrs = state->dts_errors; 15405 dstate = &state->dts_vstate.dtvs_dynvars; 15406 15407 for (i = 0; i < NCPU; i++) { 15408 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 15409 15410 stat.dtst_dyndrops += dcpu->dtdsc_drops; 15411 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 15412 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 15413 15414 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 15415 stat.dtst_filled++; 15416 15417 nerrs += state->dts_buffer[i].dtb_errors; 15418 15419 for (j = 0; j < state->dts_nspeculations; j++) { 15420 dtrace_speculation_t *spec; 15421 dtrace_buffer_t *buf; 15422 15423 spec = &state->dts_speculations[j]; 15424 buf = &spec->dtsp_buffer[i]; 15425 stat.dtst_specdrops += buf->dtb_xamot_drops; 15426 } 15427 } 15428 15429 stat.dtst_specdrops_busy = state->dts_speculations_busy; 15430 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 15431 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 15432 stat.dtst_dblerrors = state->dts_dblerrors; 15433 stat.dtst_killed = 15434 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 15435 stat.dtst_errors = nerrs; 15436 15437 mutex_exit(&dtrace_lock); 15438 15439 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 15440 return (EFAULT); 15441 15442 return (0); 15443 } 15444 15445 case DTRACEIOC_FORMAT: { 15446 dtrace_fmtdesc_t fmt; 15447 char *str; 15448 int len; 15449 15450 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 15451 return (EFAULT); 15452 15453 mutex_enter(&dtrace_lock); 15454 15455 if (fmt.dtfd_format == 0 || 15456 fmt.dtfd_format > state->dts_nformats) { 15457 mutex_exit(&dtrace_lock); 15458 return (EINVAL); 15459 } 15460 15461 /* 15462 * Format strings are allocated contiguously and they are 15463 * never freed; if a format index is less than the number 15464 * of formats, we can assert that the format map is non-NULL 15465 * and that the format for the specified index is non-NULL. 15466 */ 15467 ASSERT(state->dts_formats != NULL); 15468 str = state->dts_formats[fmt.dtfd_format - 1]; 15469 ASSERT(str != NULL); 15470 15471 len = strlen(str) + 1; 15472 15473 if (len > fmt.dtfd_length) { 15474 fmt.dtfd_length = len; 15475 15476 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 15477 mutex_exit(&dtrace_lock); 15478 return (EINVAL); 15479 } 15480 } else { 15481 if (copyout(str, fmt.dtfd_string, len) != 0) { 15482 mutex_exit(&dtrace_lock); 15483 return (EINVAL); 15484 } 15485 } 15486 15487 mutex_exit(&dtrace_lock); 15488 return (0); 15489 } 15490 15491 default: 15492 break; 15493 } 15494 15495 return (ENOTTY); 15496 } 15497 15498 /*ARGSUSED*/ 15499 static int 15500 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 15501 { 15502 dtrace_state_t *state; 15503 15504 switch (cmd) { 15505 case DDI_DETACH: 15506 break; 15507 15508 case DDI_SUSPEND: 15509 return (DDI_SUCCESS); 15510 15511 default: 15512 return (DDI_FAILURE); 15513 } 15514 15515 mutex_enter(&cpu_lock); 15516 mutex_enter(&dtrace_provider_lock); 15517 mutex_enter(&dtrace_lock); 15518 15519 ASSERT(dtrace_opens == 0); 15520 15521 if (dtrace_helpers > 0) { 15522 mutex_exit(&dtrace_provider_lock); 15523 mutex_exit(&dtrace_lock); 15524 mutex_exit(&cpu_lock); 15525 return (DDI_FAILURE); 15526 } 15527 15528 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 15529 mutex_exit(&dtrace_provider_lock); 15530 mutex_exit(&dtrace_lock); 15531 mutex_exit(&cpu_lock); 15532 return (DDI_FAILURE); 15533 } 15534 15535 dtrace_provider = NULL; 15536 15537 if ((state = dtrace_anon_grab()) != NULL) { 15538 /* 15539 * If there were ECBs on this state, the provider should 15540 * have not been allowed to detach; assert that there is 15541 * none. 15542 */ 15543 ASSERT(state->dts_necbs == 0); 15544 dtrace_state_destroy(state); 15545 15546 /* 15547 * If we're being detached with anonymous state, we need to 15548 * indicate to the kernel debugger that DTrace is now inactive. 15549 */ 15550 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15551 } 15552 15553 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 15554 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15555 dtrace_cpu_init = NULL; 15556 dtrace_helpers_cleanup = NULL; 15557 dtrace_helpers_fork = NULL; 15558 dtrace_cpustart_init = NULL; 15559 dtrace_cpustart_fini = NULL; 15560 dtrace_debugger_init = NULL; 15561 dtrace_debugger_fini = NULL; 15562 dtrace_modload = NULL; 15563 dtrace_modunload = NULL; 15564 15565 mutex_exit(&cpu_lock); 15566 15567 if (dtrace_helptrace_enabled) { 15568 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 15569 dtrace_helptrace_buffer = NULL; 15570 } 15571 15572 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 15573 dtrace_probes = NULL; 15574 dtrace_nprobes = 0; 15575 15576 dtrace_hash_destroy(dtrace_bymod); 15577 dtrace_hash_destroy(dtrace_byfunc); 15578 dtrace_hash_destroy(dtrace_byname); 15579 dtrace_bymod = NULL; 15580 dtrace_byfunc = NULL; 15581 dtrace_byname = NULL; 15582 15583 kmem_cache_destroy(dtrace_state_cache); 15584 vmem_destroy(dtrace_minor); 15585 vmem_destroy(dtrace_arena); 15586 15587 if (dtrace_toxrange != NULL) { 15588 kmem_free(dtrace_toxrange, 15589 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 15590 dtrace_toxrange = NULL; 15591 dtrace_toxranges = 0; 15592 dtrace_toxranges_max = 0; 15593 } 15594 15595 ddi_remove_minor_node(dtrace_devi, NULL); 15596 dtrace_devi = NULL; 15597 15598 ddi_soft_state_fini(&dtrace_softstate); 15599 15600 ASSERT(dtrace_vtime_references == 0); 15601 ASSERT(dtrace_opens == 0); 15602 ASSERT(dtrace_retained == NULL); 15603 15604 mutex_exit(&dtrace_lock); 15605 mutex_exit(&dtrace_provider_lock); 15606 15607 /* 15608 * We don't destroy the task queue until after we have dropped our 15609 * locks (taskq_destroy() may block on running tasks). To prevent 15610 * attempting to do work after we have effectively detached but before 15611 * the task queue has been destroyed, all tasks dispatched via the 15612 * task queue must check that DTrace is still attached before 15613 * performing any operation. 15614 */ 15615 taskq_destroy(dtrace_taskq); 15616 dtrace_taskq = NULL; 15617 15618 return (DDI_SUCCESS); 15619 } 15620 15621 /*ARGSUSED*/ 15622 static int 15623 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 15624 { 15625 int error; 15626 15627 switch (infocmd) { 15628 case DDI_INFO_DEVT2DEVINFO: 15629 *result = (void *)dtrace_devi; 15630 error = DDI_SUCCESS; 15631 break; 15632 case DDI_INFO_DEVT2INSTANCE: 15633 *result = (void *)0; 15634 error = DDI_SUCCESS; 15635 break; 15636 default: 15637 error = DDI_FAILURE; 15638 } 15639 return (error); 15640 } 15641 15642 static struct cb_ops dtrace_cb_ops = { 15643 dtrace_open, /* open */ 15644 dtrace_close, /* close */ 15645 nulldev, /* strategy */ 15646 nulldev, /* print */ 15647 nodev, /* dump */ 15648 nodev, /* read */ 15649 nodev, /* write */ 15650 dtrace_ioctl, /* ioctl */ 15651 nodev, /* devmap */ 15652 nodev, /* mmap */ 15653 nodev, /* segmap */ 15654 nochpoll, /* poll */ 15655 ddi_prop_op, /* cb_prop_op */ 15656 0, /* streamtab */ 15657 D_NEW | D_MP /* Driver compatibility flag */ 15658 }; 15659 15660 static struct dev_ops dtrace_ops = { 15661 DEVO_REV, /* devo_rev */ 15662 0, /* refcnt */ 15663 dtrace_info, /* get_dev_info */ 15664 nulldev, /* identify */ 15665 nulldev, /* probe */ 15666 dtrace_attach, /* attach */ 15667 dtrace_detach, /* detach */ 15668 nodev, /* reset */ 15669 &dtrace_cb_ops, /* driver operations */ 15670 NULL, /* bus operations */ 15671 nodev, /* dev power */ 15672 ddi_quiesce_not_needed, /* quiesce */ 15673 }; 15674 15675 static struct modldrv modldrv = { 15676 &mod_driverops, /* module type (this is a pseudo driver) */ 15677 "Dynamic Tracing", /* name of module */ 15678 &dtrace_ops, /* driver ops */ 15679 }; 15680 15681 static struct modlinkage modlinkage = { 15682 MODREV_1, 15683 (void *)&modldrv, 15684 NULL 15685 }; 15686 15687 int 15688 _init(void) 15689 { 15690 return (mod_install(&modlinkage)); 15691 } 15692 15693 int 15694 _info(struct modinfo *modinfop) 15695 { 15696 return (mod_info(&modlinkage, modinfop)); 15697 } 15698 15699 int 15700 _fini(void) 15701 { 15702 return (mod_remove(&modlinkage)); 15703 } 15704