1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 #include <sys/socket.h> 95 #include <netinet/in.h> 96 97 /* 98 * DTrace Tunable Variables 99 * 100 * The following variables may be tuned by adding a line to /etc/system that 101 * includes both the name of the DTrace module ("dtrace") and the name of the 102 * variable. For example: 103 * 104 * set dtrace:dtrace_destructive_disallow = 1 105 * 106 * In general, the only variables that one should be tuning this way are those 107 * that affect system-wide DTrace behavior, and for which the default behavior 108 * is undesirable. Most of these variables are tunable on a per-consumer 109 * basis using DTrace options, and need not be tuned on a system-wide basis. 110 * When tuning these variables, avoid pathological values; while some attempt 111 * is made to verify the integrity of these variables, they are not considered 112 * part of the supported interface to DTrace, and they are therefore not 113 * checked comprehensively. Further, these variables should not be tuned 114 * dynamically via "mdb -kw" or other means; they should only be tuned via 115 * /etc/system. 116 */ 117 int dtrace_destructive_disallow = 0; 118 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 119 size_t dtrace_difo_maxsize = (256 * 1024); 120 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 121 size_t dtrace_global_maxsize = (16 * 1024); 122 size_t dtrace_actions_max = (16 * 1024); 123 size_t dtrace_retain_max = 1024; 124 dtrace_optval_t dtrace_helper_actions_max = 32; 125 dtrace_optval_t dtrace_helper_providers_max = 32; 126 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 127 size_t dtrace_strsize_default = 256; 128 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 129 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 130 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 131 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 132 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 134 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 135 dtrace_optval_t dtrace_nspec_default = 1; 136 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 137 dtrace_optval_t dtrace_stackframes_default = 20; 138 dtrace_optval_t dtrace_ustackframes_default = 20; 139 dtrace_optval_t dtrace_jstackframes_default = 50; 140 dtrace_optval_t dtrace_jstackstrsize_default = 512; 141 int dtrace_msgdsize_max = 128; 142 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 143 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 144 int dtrace_devdepth_max = 32; 145 int dtrace_err_verbose; 146 hrtime_t dtrace_deadman_interval = NANOSEC; 147 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 148 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 149 150 /* 151 * DTrace External Variables 152 * 153 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 154 * available to DTrace consumers via the backtick (`) syntax. One of these, 155 * dtrace_zero, is made deliberately so: it is provided as a source of 156 * well-known, zero-filled memory. While this variable is not documented, 157 * it is used by some translators as an implementation detail. 158 */ 159 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 160 161 /* 162 * DTrace Internal Variables 163 */ 164 static dev_info_t *dtrace_devi; /* device info */ 165 static vmem_t *dtrace_arena; /* probe ID arena */ 166 static vmem_t *dtrace_minor; /* minor number arena */ 167 static taskq_t *dtrace_taskq; /* task queue */ 168 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 169 static int dtrace_nprobes; /* number of probes */ 170 static dtrace_provider_t *dtrace_provider; /* provider list */ 171 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 172 static int dtrace_opens; /* number of opens */ 173 static int dtrace_helpers; /* number of helpers */ 174 static void *dtrace_softstate; /* softstate pointer */ 175 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 176 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 177 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 178 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 179 static int dtrace_toxranges; /* number of toxic ranges */ 180 static int dtrace_toxranges_max; /* size of toxic range array */ 181 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 182 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 183 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 184 static kthread_t *dtrace_panicked; /* panicking thread */ 185 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 186 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 187 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 188 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 189 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 190 191 /* 192 * DTrace Locking 193 * DTrace is protected by three (relatively coarse-grained) locks: 194 * 195 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 196 * including enabling state, probes, ECBs, consumer state, helper state, 197 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 198 * probe context is lock-free -- synchronization is handled via the 199 * dtrace_sync() cross call mechanism. 200 * 201 * (2) dtrace_provider_lock is required when manipulating provider state, or 202 * when provider state must be held constant. 203 * 204 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 205 * when meta provider state must be held constant. 206 * 207 * The lock ordering between these three locks is dtrace_meta_lock before 208 * dtrace_provider_lock before dtrace_lock. (In particular, there are 209 * several places where dtrace_provider_lock is held by the framework as it 210 * calls into the providers -- which then call back into the framework, 211 * grabbing dtrace_lock.) 212 * 213 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 214 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 215 * role as a coarse-grained lock; it is acquired before both of these locks. 216 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 217 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 218 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 219 * acquired _between_ dtrace_provider_lock and dtrace_lock. 220 */ 221 static kmutex_t dtrace_lock; /* probe state lock */ 222 static kmutex_t dtrace_provider_lock; /* provider state lock */ 223 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 224 225 /* 226 * DTrace Provider Variables 227 * 228 * These are the variables relating to DTrace as a provider (that is, the 229 * provider of the BEGIN, END, and ERROR probes). 230 */ 231 static dtrace_pattr_t dtrace_provider_attr = { 232 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 233 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 234 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 235 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 237 }; 238 239 static void 240 dtrace_nullop(void) 241 {} 242 243 static dtrace_pops_t dtrace_provider_ops = { 244 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 245 (void (*)(void *, struct modctl *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 248 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 249 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 250 NULL, 251 NULL, 252 NULL, 253 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 254 }; 255 256 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 257 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 258 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 259 260 /* 261 * DTrace Helper Tracing Variables 262 */ 263 uint32_t dtrace_helptrace_next = 0; 264 uint32_t dtrace_helptrace_nlocals; 265 char *dtrace_helptrace_buffer; 266 int dtrace_helptrace_bufsize = 512 * 1024; 267 268 #ifdef DEBUG 269 int dtrace_helptrace_enabled = 1; 270 #else 271 int dtrace_helptrace_enabled = 0; 272 #endif 273 274 /* 275 * DTrace Error Hashing 276 * 277 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 278 * table. This is very useful for checking coverage of tests that are 279 * expected to induce DIF or DOF processing errors, and may be useful for 280 * debugging problems in the DIF code generator or in DOF generation . The 281 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 282 */ 283 #ifdef DEBUG 284 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 285 static const char *dtrace_errlast; 286 static kthread_t *dtrace_errthread; 287 static kmutex_t dtrace_errlock; 288 #endif 289 290 /* 291 * DTrace Macros and Constants 292 * 293 * These are various macros that are useful in various spots in the 294 * implementation, along with a few random constants that have no meaning 295 * outside of the implementation. There is no real structure to this cpp 296 * mishmash -- but is there ever? 297 */ 298 #define DTRACE_HASHSTR(hash, probe) \ 299 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 300 301 #define DTRACE_HASHNEXT(hash, probe) \ 302 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 303 304 #define DTRACE_HASHPREV(hash, probe) \ 305 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 306 307 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 308 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 309 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 310 311 #define DTRACE_AGGHASHSIZE_SLEW 17 312 313 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 314 315 /* 316 * The key for a thread-local variable consists of the lower 61 bits of the 317 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 318 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 319 * equal to a variable identifier. This is necessary (but not sufficient) to 320 * assure that global associative arrays never collide with thread-local 321 * variables. To guarantee that they cannot collide, we must also define the 322 * order for keying dynamic variables. That order is: 323 * 324 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 325 * 326 * Because the variable-key and the tls-key are in orthogonal spaces, there is 327 * no way for a global variable key signature to match a thread-local key 328 * signature. 329 */ 330 #define DTRACE_TLS_THRKEY(where) { \ 331 uint_t intr = 0; \ 332 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 333 for (; actv; actv >>= 1) \ 334 intr++; \ 335 ASSERT(intr < (1 << 3)); \ 336 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 337 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 338 } 339 340 #define DT_BSWAP_8(x) ((x) & 0xff) 341 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 342 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 343 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 344 345 #define DTRACE_STORE(type, tomax, offset, what) \ 346 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 347 348 #ifndef __i386 349 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 350 if (addr & (size - 1)) { \ 351 *flags |= CPU_DTRACE_BADALIGN; \ 352 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 353 return (0); \ 354 } 355 #else 356 #define DTRACE_ALIGNCHECK(addr, size, flags) 357 #endif 358 359 /* 360 * Test whether a range of memory starting at testaddr of size testsz falls 361 * within the range of memory described by addr, sz. We take care to avoid 362 * problems with overflow and underflow of the unsigned quantities, and 363 * disallow all negative sizes. Ranges of size 0 are allowed. 364 */ 365 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 366 ((testaddr) - (baseaddr) < (basesz) && \ 367 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 368 (testaddr) + (testsz) >= (testaddr)) 369 370 /* 371 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 372 * alloc_sz on the righthand side of the comparison in order to avoid overflow 373 * or underflow in the comparison with it. This is simpler than the INRANGE 374 * check above, because we know that the dtms_scratch_ptr is valid in the 375 * range. Allocations of size zero are allowed. 376 */ 377 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 378 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 379 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 380 381 #define DTRACE_LOADFUNC(bits) \ 382 /*CSTYLED*/ \ 383 uint##bits##_t \ 384 dtrace_load##bits(uintptr_t addr) \ 385 { \ 386 size_t size = bits / NBBY; \ 387 /*CSTYLED*/ \ 388 uint##bits##_t rval; \ 389 int i; \ 390 volatile uint16_t *flags = (volatile uint16_t *) \ 391 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 392 \ 393 DTRACE_ALIGNCHECK(addr, size, flags); \ 394 \ 395 for (i = 0; i < dtrace_toxranges; i++) { \ 396 if (addr >= dtrace_toxrange[i].dtt_limit) \ 397 continue; \ 398 \ 399 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 400 continue; \ 401 \ 402 /* \ 403 * This address falls within a toxic region; return 0. \ 404 */ \ 405 *flags |= CPU_DTRACE_BADADDR; \ 406 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 407 return (0); \ 408 } \ 409 \ 410 *flags |= CPU_DTRACE_NOFAULT; \ 411 /*CSTYLED*/ \ 412 rval = *((volatile uint##bits##_t *)addr); \ 413 *flags &= ~CPU_DTRACE_NOFAULT; \ 414 \ 415 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 416 } 417 418 #ifdef _LP64 419 #define dtrace_loadptr dtrace_load64 420 #else 421 #define dtrace_loadptr dtrace_load32 422 #endif 423 424 #define DTRACE_DYNHASH_FREE 0 425 #define DTRACE_DYNHASH_SINK 1 426 #define DTRACE_DYNHASH_VALID 2 427 428 #define DTRACE_MATCH_NEXT 0 429 #define DTRACE_MATCH_DONE 1 430 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 431 #define DTRACE_STATE_ALIGN 64 432 433 #define DTRACE_FLAGS2FLT(flags) \ 434 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 435 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 436 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 437 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 438 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 439 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 440 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 441 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 442 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 443 DTRACEFLT_UNKNOWN) 444 445 #define DTRACEACT_ISSTRING(act) \ 446 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 447 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 448 449 static size_t dtrace_strlen(const char *, size_t); 450 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 451 static void dtrace_enabling_provide(dtrace_provider_t *); 452 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 453 static void dtrace_enabling_matchall(void); 454 static dtrace_state_t *dtrace_anon_grab(void); 455 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 456 dtrace_state_t *, uint64_t, uint64_t); 457 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 458 static void dtrace_buffer_drop(dtrace_buffer_t *); 459 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 460 dtrace_state_t *, dtrace_mstate_t *); 461 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 462 dtrace_optval_t); 463 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 464 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 465 466 /* 467 * DTrace Probe Context Functions 468 * 469 * These functions are called from probe context. Because probe context is 470 * any context in which C may be called, arbitrarily locks may be held, 471 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 472 * As a result, functions called from probe context may only call other DTrace 473 * support functions -- they may not interact at all with the system at large. 474 * (Note that the ASSERT macro is made probe-context safe by redefining it in 475 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 476 * loads are to be performed from probe context, they _must_ be in terms of 477 * the safe dtrace_load*() variants. 478 * 479 * Some functions in this block are not actually called from probe context; 480 * for these functions, there will be a comment above the function reading 481 * "Note: not called from probe context." 482 */ 483 void 484 dtrace_panic(const char *format, ...) 485 { 486 va_list alist; 487 488 va_start(alist, format); 489 dtrace_vpanic(format, alist); 490 va_end(alist); 491 } 492 493 int 494 dtrace_assfail(const char *a, const char *f, int l) 495 { 496 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 497 498 /* 499 * We just need something here that even the most clever compiler 500 * cannot optimize away. 501 */ 502 return (a[(uintptr_t)f]); 503 } 504 505 /* 506 * Atomically increment a specified error counter from probe context. 507 */ 508 static void 509 dtrace_error(uint32_t *counter) 510 { 511 /* 512 * Most counters stored to in probe context are per-CPU counters. 513 * However, there are some error conditions that are sufficiently 514 * arcane that they don't merit per-CPU storage. If these counters 515 * are incremented concurrently on different CPUs, scalability will be 516 * adversely affected -- but we don't expect them to be white-hot in a 517 * correctly constructed enabling... 518 */ 519 uint32_t oval, nval; 520 521 do { 522 oval = *counter; 523 524 if ((nval = oval + 1) == 0) { 525 /* 526 * If the counter would wrap, set it to 1 -- assuring 527 * that the counter is never zero when we have seen 528 * errors. (The counter must be 32-bits because we 529 * aren't guaranteed a 64-bit compare&swap operation.) 530 * To save this code both the infamy of being fingered 531 * by a priggish news story and the indignity of being 532 * the target of a neo-puritan witch trial, we're 533 * carefully avoiding any colorful description of the 534 * likelihood of this condition -- but suffice it to 535 * say that it is only slightly more likely than the 536 * overflow of predicate cache IDs, as discussed in 537 * dtrace_predicate_create(). 538 */ 539 nval = 1; 540 } 541 } while (dtrace_cas32(counter, oval, nval) != oval); 542 } 543 544 /* 545 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 546 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 547 */ 548 DTRACE_LOADFUNC(8) 549 DTRACE_LOADFUNC(16) 550 DTRACE_LOADFUNC(32) 551 DTRACE_LOADFUNC(64) 552 553 static int 554 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 555 { 556 if (dest < mstate->dtms_scratch_base) 557 return (0); 558 559 if (dest + size < dest) 560 return (0); 561 562 if (dest + size > mstate->dtms_scratch_ptr) 563 return (0); 564 565 return (1); 566 } 567 568 static int 569 dtrace_canstore_statvar(uint64_t addr, size_t sz, 570 dtrace_statvar_t **svars, int nsvars) 571 { 572 int i; 573 574 for (i = 0; i < nsvars; i++) { 575 dtrace_statvar_t *svar = svars[i]; 576 577 if (svar == NULL || svar->dtsv_size == 0) 578 continue; 579 580 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 581 return (1); 582 } 583 584 return (0); 585 } 586 587 /* 588 * Check to see if the address is within a memory region to which a store may 589 * be issued. This includes the DTrace scratch areas, and any DTrace variable 590 * region. The caller of dtrace_canstore() is responsible for performing any 591 * alignment checks that are needed before stores are actually executed. 592 */ 593 static int 594 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 595 dtrace_vstate_t *vstate) 596 { 597 /* 598 * First, check to see if the address is in scratch space... 599 */ 600 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 601 mstate->dtms_scratch_size)) 602 return (1); 603 604 /* 605 * Now check to see if it's a dynamic variable. This check will pick 606 * up both thread-local variables and any global dynamically-allocated 607 * variables. 608 */ 609 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 610 vstate->dtvs_dynvars.dtds_size)) 611 return (1); 612 613 /* 614 * Finally, check the static local and global variables. These checks 615 * take the longest, so we perform them last. 616 */ 617 if (dtrace_canstore_statvar(addr, sz, 618 vstate->dtvs_locals, vstate->dtvs_nlocals)) 619 return (1); 620 621 if (dtrace_canstore_statvar(addr, sz, 622 vstate->dtvs_globals, vstate->dtvs_nglobals)) 623 return (1); 624 625 return (0); 626 } 627 628 629 /* 630 * Convenience routine to check to see if the address is within a memory 631 * region in which a load may be issued given the user's privilege level; 632 * if not, it sets the appropriate error flags and loads 'addr' into the 633 * illegal value slot. 634 * 635 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 636 * appropriate memory access protection. 637 */ 638 static int 639 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 640 dtrace_vstate_t *vstate) 641 { 642 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 643 644 /* 645 * If we hold the privilege to read from kernel memory, then 646 * everything is readable. 647 */ 648 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 649 return (1); 650 651 /* 652 * You can obviously read that which you can store. 653 */ 654 if (dtrace_canstore(addr, sz, mstate, vstate)) 655 return (1); 656 657 /* 658 * We're allowed to read from our own string table. 659 */ 660 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 661 mstate->dtms_difo->dtdo_strlen)) 662 return (1); 663 664 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 665 *illval = addr; 666 return (0); 667 } 668 669 /* 670 * Convenience routine to check to see if a given string is within a memory 671 * region in which a load may be issued given the user's privilege level; 672 * this exists so that we don't need to issue unnecessary dtrace_strlen() 673 * calls in the event that the user has all privileges. 674 */ 675 static int 676 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 677 dtrace_vstate_t *vstate) 678 { 679 size_t strsz; 680 681 /* 682 * If we hold the privilege to read from kernel memory, then 683 * everything is readable. 684 */ 685 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 686 return (1); 687 688 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 689 if (dtrace_canload(addr, strsz, mstate, vstate)) 690 return (1); 691 692 return (0); 693 } 694 695 /* 696 * Convenience routine to check to see if a given variable is within a memory 697 * region in which a load may be issued given the user's privilege level. 698 */ 699 static int 700 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 701 dtrace_vstate_t *vstate) 702 { 703 size_t sz; 704 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 705 706 /* 707 * If we hold the privilege to read from kernel memory, then 708 * everything is readable. 709 */ 710 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 711 return (1); 712 713 if (type->dtdt_kind == DIF_TYPE_STRING) 714 sz = dtrace_strlen(src, 715 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 716 else 717 sz = type->dtdt_size; 718 719 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 720 } 721 722 /* 723 * Compare two strings using safe loads. 724 */ 725 static int 726 dtrace_strncmp(char *s1, char *s2, size_t limit) 727 { 728 uint8_t c1, c2; 729 volatile uint16_t *flags; 730 731 if (s1 == s2 || limit == 0) 732 return (0); 733 734 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 735 736 do { 737 if (s1 == NULL) { 738 c1 = '\0'; 739 } else { 740 c1 = dtrace_load8((uintptr_t)s1++); 741 } 742 743 if (s2 == NULL) { 744 c2 = '\0'; 745 } else { 746 c2 = dtrace_load8((uintptr_t)s2++); 747 } 748 749 if (c1 != c2) 750 return (c1 - c2); 751 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 752 753 return (0); 754 } 755 756 /* 757 * Compute strlen(s) for a string using safe memory accesses. The additional 758 * len parameter is used to specify a maximum length to ensure completion. 759 */ 760 static size_t 761 dtrace_strlen(const char *s, size_t lim) 762 { 763 uint_t len; 764 765 for (len = 0; len != lim; len++) { 766 if (dtrace_load8((uintptr_t)s++) == '\0') 767 break; 768 } 769 770 return (len); 771 } 772 773 /* 774 * Check if an address falls within a toxic region. 775 */ 776 static int 777 dtrace_istoxic(uintptr_t kaddr, size_t size) 778 { 779 uintptr_t taddr, tsize; 780 int i; 781 782 for (i = 0; i < dtrace_toxranges; i++) { 783 taddr = dtrace_toxrange[i].dtt_base; 784 tsize = dtrace_toxrange[i].dtt_limit - taddr; 785 786 if (kaddr - taddr < tsize) { 787 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 788 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 789 return (1); 790 } 791 792 if (taddr - kaddr < size) { 793 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 794 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 795 return (1); 796 } 797 } 798 799 return (0); 800 } 801 802 /* 803 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 804 * memory specified by the DIF program. The dst is assumed to be safe memory 805 * that we can store to directly because it is managed by DTrace. As with 806 * standard bcopy, overlapping copies are handled properly. 807 */ 808 static void 809 dtrace_bcopy(const void *src, void *dst, size_t len) 810 { 811 if (len != 0) { 812 uint8_t *s1 = dst; 813 const uint8_t *s2 = src; 814 815 if (s1 <= s2) { 816 do { 817 *s1++ = dtrace_load8((uintptr_t)s2++); 818 } while (--len != 0); 819 } else { 820 s2 += len; 821 s1 += len; 822 823 do { 824 *--s1 = dtrace_load8((uintptr_t)--s2); 825 } while (--len != 0); 826 } 827 } 828 } 829 830 /* 831 * Copy src to dst using safe memory accesses, up to either the specified 832 * length, or the point that a nul byte is encountered. The src is assumed to 833 * be unsafe memory specified by the DIF program. The dst is assumed to be 834 * safe memory that we can store to directly because it is managed by DTrace. 835 * Unlike dtrace_bcopy(), overlapping regions are not handled. 836 */ 837 static void 838 dtrace_strcpy(const void *src, void *dst, size_t len) 839 { 840 if (len != 0) { 841 uint8_t *s1 = dst, c; 842 const uint8_t *s2 = src; 843 844 do { 845 *s1++ = c = dtrace_load8((uintptr_t)s2++); 846 } while (--len != 0 && c != '\0'); 847 } 848 } 849 850 /* 851 * Copy src to dst, deriving the size and type from the specified (BYREF) 852 * variable type. The src is assumed to be unsafe memory specified by the DIF 853 * program. The dst is assumed to be DTrace variable memory that is of the 854 * specified type; we assume that we can store to directly. 855 */ 856 static void 857 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 858 { 859 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 860 861 if (type->dtdt_kind == DIF_TYPE_STRING) { 862 dtrace_strcpy(src, dst, type->dtdt_size); 863 } else { 864 dtrace_bcopy(src, dst, type->dtdt_size); 865 } 866 } 867 868 /* 869 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 870 * unsafe memory specified by the DIF program. The s2 data is assumed to be 871 * safe memory that we can access directly because it is managed by DTrace. 872 */ 873 static int 874 dtrace_bcmp(const void *s1, const void *s2, size_t len) 875 { 876 volatile uint16_t *flags; 877 878 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 879 880 if (s1 == s2) 881 return (0); 882 883 if (s1 == NULL || s2 == NULL) 884 return (1); 885 886 if (s1 != s2 && len != 0) { 887 const uint8_t *ps1 = s1; 888 const uint8_t *ps2 = s2; 889 890 do { 891 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 892 return (1); 893 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 894 } 895 return (0); 896 } 897 898 /* 899 * Zero the specified region using a simple byte-by-byte loop. Note that this 900 * is for safe DTrace-managed memory only. 901 */ 902 static void 903 dtrace_bzero(void *dst, size_t len) 904 { 905 uchar_t *cp; 906 907 for (cp = dst; len != 0; len--) 908 *cp++ = 0; 909 } 910 911 /* 912 * This privilege check should be used by actions and subroutines to 913 * verify that the user credentials of the process that enabled the 914 * invoking ECB match the target credentials 915 */ 916 static int 917 dtrace_priv_proc_common_user(dtrace_state_t *state) 918 { 919 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 920 921 /* 922 * We should always have a non-NULL state cred here, since if cred 923 * is null (anonymous tracing), we fast-path bypass this routine. 924 */ 925 ASSERT(s_cr != NULL); 926 927 if ((cr = CRED()) != NULL && 928 s_cr->cr_uid == cr->cr_uid && 929 s_cr->cr_uid == cr->cr_ruid && 930 s_cr->cr_uid == cr->cr_suid && 931 s_cr->cr_gid == cr->cr_gid && 932 s_cr->cr_gid == cr->cr_rgid && 933 s_cr->cr_gid == cr->cr_sgid) 934 return (1); 935 936 return (0); 937 } 938 939 /* 940 * This privilege check should be used by actions and subroutines to 941 * verify that the zone of the process that enabled the invoking ECB 942 * matches the target credentials 943 */ 944 static int 945 dtrace_priv_proc_common_zone(dtrace_state_t *state) 946 { 947 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 948 949 /* 950 * We should always have a non-NULL state cred here, since if cred 951 * is null (anonymous tracing), we fast-path bypass this routine. 952 */ 953 ASSERT(s_cr != NULL); 954 955 if ((cr = CRED()) != NULL && 956 s_cr->cr_zone == cr->cr_zone) 957 return (1); 958 959 return (0); 960 } 961 962 /* 963 * This privilege check should be used by actions and subroutines to 964 * verify that the process has not setuid or changed credentials. 965 */ 966 static int 967 dtrace_priv_proc_common_nocd() 968 { 969 proc_t *proc; 970 971 if ((proc = ttoproc(curthread)) != NULL && 972 !(proc->p_flag & SNOCD)) 973 return (1); 974 975 return (0); 976 } 977 978 static int 979 dtrace_priv_proc_destructive(dtrace_state_t *state) 980 { 981 int action = state->dts_cred.dcr_action; 982 983 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 984 dtrace_priv_proc_common_zone(state) == 0) 985 goto bad; 986 987 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 988 dtrace_priv_proc_common_user(state) == 0) 989 goto bad; 990 991 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 992 dtrace_priv_proc_common_nocd() == 0) 993 goto bad; 994 995 return (1); 996 997 bad: 998 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 999 1000 return (0); 1001 } 1002 1003 static int 1004 dtrace_priv_proc_control(dtrace_state_t *state) 1005 { 1006 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1007 return (1); 1008 1009 if (dtrace_priv_proc_common_zone(state) && 1010 dtrace_priv_proc_common_user(state) && 1011 dtrace_priv_proc_common_nocd()) 1012 return (1); 1013 1014 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1015 1016 return (0); 1017 } 1018 1019 static int 1020 dtrace_priv_proc(dtrace_state_t *state) 1021 { 1022 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1023 return (1); 1024 1025 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1026 1027 return (0); 1028 } 1029 1030 static int 1031 dtrace_priv_kernel(dtrace_state_t *state) 1032 { 1033 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1034 return (1); 1035 1036 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1037 1038 return (0); 1039 } 1040 1041 static int 1042 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1043 { 1044 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1045 return (1); 1046 1047 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1048 1049 return (0); 1050 } 1051 1052 /* 1053 * Note: not called from probe context. This function is called 1054 * asynchronously (and at a regular interval) from outside of probe context to 1055 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1056 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1057 */ 1058 void 1059 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1060 { 1061 dtrace_dynvar_t *dirty; 1062 dtrace_dstate_percpu_t *dcpu; 1063 int i, work = 0; 1064 1065 for (i = 0; i < NCPU; i++) { 1066 dcpu = &dstate->dtds_percpu[i]; 1067 1068 ASSERT(dcpu->dtdsc_rinsing == NULL); 1069 1070 /* 1071 * If the dirty list is NULL, there is no dirty work to do. 1072 */ 1073 if (dcpu->dtdsc_dirty == NULL) 1074 continue; 1075 1076 /* 1077 * If the clean list is non-NULL, then we're not going to do 1078 * any work for this CPU -- it means that there has not been 1079 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1080 * since the last time we cleaned house. 1081 */ 1082 if (dcpu->dtdsc_clean != NULL) 1083 continue; 1084 1085 work = 1; 1086 1087 /* 1088 * Atomically move the dirty list aside. 1089 */ 1090 do { 1091 dirty = dcpu->dtdsc_dirty; 1092 1093 /* 1094 * Before we zap the dirty list, set the rinsing list. 1095 * (This allows for a potential assertion in 1096 * dtrace_dynvar(): if a free dynamic variable appears 1097 * on a hash chain, either the dirty list or the 1098 * rinsing list for some CPU must be non-NULL.) 1099 */ 1100 dcpu->dtdsc_rinsing = dirty; 1101 dtrace_membar_producer(); 1102 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1103 dirty, NULL) != dirty); 1104 } 1105 1106 if (!work) { 1107 /* 1108 * We have no work to do; we can simply return. 1109 */ 1110 return; 1111 } 1112 1113 dtrace_sync(); 1114 1115 for (i = 0; i < NCPU; i++) { 1116 dcpu = &dstate->dtds_percpu[i]; 1117 1118 if (dcpu->dtdsc_rinsing == NULL) 1119 continue; 1120 1121 /* 1122 * We are now guaranteed that no hash chain contains a pointer 1123 * into this dirty list; we can make it clean. 1124 */ 1125 ASSERT(dcpu->dtdsc_clean == NULL); 1126 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1127 dcpu->dtdsc_rinsing = NULL; 1128 } 1129 1130 /* 1131 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1132 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1133 * This prevents a race whereby a CPU incorrectly decides that 1134 * the state should be something other than DTRACE_DSTATE_CLEAN 1135 * after dtrace_dynvar_clean() has completed. 1136 */ 1137 dtrace_sync(); 1138 1139 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1140 } 1141 1142 /* 1143 * Depending on the value of the op parameter, this function looks-up, 1144 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1145 * allocation is requested, this function will return a pointer to a 1146 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1147 * variable can be allocated. If NULL is returned, the appropriate counter 1148 * will be incremented. 1149 */ 1150 dtrace_dynvar_t * 1151 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1152 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1153 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1154 { 1155 uint64_t hashval = DTRACE_DYNHASH_VALID; 1156 dtrace_dynhash_t *hash = dstate->dtds_hash; 1157 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1158 processorid_t me = CPU->cpu_id, cpu = me; 1159 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1160 size_t bucket, ksize; 1161 size_t chunksize = dstate->dtds_chunksize; 1162 uintptr_t kdata, lock, nstate; 1163 uint_t i; 1164 1165 ASSERT(nkeys != 0); 1166 1167 /* 1168 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1169 * algorithm. For the by-value portions, we perform the algorithm in 1170 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1171 * bit, and seems to have only a minute effect on distribution. For 1172 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1173 * over each referenced byte. It's painful to do this, but it's much 1174 * better than pathological hash distribution. The efficacy of the 1175 * hashing algorithm (and a comparison with other algorithms) may be 1176 * found by running the ::dtrace_dynstat MDB dcmd. 1177 */ 1178 for (i = 0; i < nkeys; i++) { 1179 if (key[i].dttk_size == 0) { 1180 uint64_t val = key[i].dttk_value; 1181 1182 hashval += (val >> 48) & 0xffff; 1183 hashval += (hashval << 10); 1184 hashval ^= (hashval >> 6); 1185 1186 hashval += (val >> 32) & 0xffff; 1187 hashval += (hashval << 10); 1188 hashval ^= (hashval >> 6); 1189 1190 hashval += (val >> 16) & 0xffff; 1191 hashval += (hashval << 10); 1192 hashval ^= (hashval >> 6); 1193 1194 hashval += val & 0xffff; 1195 hashval += (hashval << 10); 1196 hashval ^= (hashval >> 6); 1197 } else { 1198 /* 1199 * This is incredibly painful, but it beats the hell 1200 * out of the alternative. 1201 */ 1202 uint64_t j, size = key[i].dttk_size; 1203 uintptr_t base = (uintptr_t)key[i].dttk_value; 1204 1205 if (!dtrace_canload(base, size, mstate, vstate)) 1206 break; 1207 1208 for (j = 0; j < size; j++) { 1209 hashval += dtrace_load8(base + j); 1210 hashval += (hashval << 10); 1211 hashval ^= (hashval >> 6); 1212 } 1213 } 1214 } 1215 1216 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1217 return (NULL); 1218 1219 hashval += (hashval << 3); 1220 hashval ^= (hashval >> 11); 1221 hashval += (hashval << 15); 1222 1223 /* 1224 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1225 * comes out to be one of our two sentinel hash values. If this 1226 * actually happens, we set the hashval to be a value known to be a 1227 * non-sentinel value. 1228 */ 1229 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1230 hashval = DTRACE_DYNHASH_VALID; 1231 1232 /* 1233 * Yes, it's painful to do a divide here. If the cycle count becomes 1234 * important here, tricks can be pulled to reduce it. (However, it's 1235 * critical that hash collisions be kept to an absolute minimum; 1236 * they're much more painful than a divide.) It's better to have a 1237 * solution that generates few collisions and still keeps things 1238 * relatively simple. 1239 */ 1240 bucket = hashval % dstate->dtds_hashsize; 1241 1242 if (op == DTRACE_DYNVAR_DEALLOC) { 1243 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1244 1245 for (;;) { 1246 while ((lock = *lockp) & 1) 1247 continue; 1248 1249 if (dtrace_casptr((void *)lockp, 1250 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1251 break; 1252 } 1253 1254 dtrace_membar_producer(); 1255 } 1256 1257 top: 1258 prev = NULL; 1259 lock = hash[bucket].dtdh_lock; 1260 1261 dtrace_membar_consumer(); 1262 1263 start = hash[bucket].dtdh_chain; 1264 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1265 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1266 op != DTRACE_DYNVAR_DEALLOC)); 1267 1268 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1269 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1270 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1271 1272 if (dvar->dtdv_hashval != hashval) { 1273 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1274 /* 1275 * We've reached the sink, and therefore the 1276 * end of the hash chain; we can kick out of 1277 * the loop knowing that we have seen a valid 1278 * snapshot of state. 1279 */ 1280 ASSERT(dvar->dtdv_next == NULL); 1281 ASSERT(dvar == &dtrace_dynhash_sink); 1282 break; 1283 } 1284 1285 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1286 /* 1287 * We've gone off the rails: somewhere along 1288 * the line, one of the members of this hash 1289 * chain was deleted. Note that we could also 1290 * detect this by simply letting this loop run 1291 * to completion, as we would eventually hit 1292 * the end of the dirty list. However, we 1293 * want to avoid running the length of the 1294 * dirty list unnecessarily (it might be quite 1295 * long), so we catch this as early as 1296 * possible by detecting the hash marker. In 1297 * this case, we simply set dvar to NULL and 1298 * break; the conditional after the loop will 1299 * send us back to top. 1300 */ 1301 dvar = NULL; 1302 break; 1303 } 1304 1305 goto next; 1306 } 1307 1308 if (dtuple->dtt_nkeys != nkeys) 1309 goto next; 1310 1311 for (i = 0; i < nkeys; i++, dkey++) { 1312 if (dkey->dttk_size != key[i].dttk_size) 1313 goto next; /* size or type mismatch */ 1314 1315 if (dkey->dttk_size != 0) { 1316 if (dtrace_bcmp( 1317 (void *)(uintptr_t)key[i].dttk_value, 1318 (void *)(uintptr_t)dkey->dttk_value, 1319 dkey->dttk_size)) 1320 goto next; 1321 } else { 1322 if (dkey->dttk_value != key[i].dttk_value) 1323 goto next; 1324 } 1325 } 1326 1327 if (op != DTRACE_DYNVAR_DEALLOC) 1328 return (dvar); 1329 1330 ASSERT(dvar->dtdv_next == NULL || 1331 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1332 1333 if (prev != NULL) { 1334 ASSERT(hash[bucket].dtdh_chain != dvar); 1335 ASSERT(start != dvar); 1336 ASSERT(prev->dtdv_next == dvar); 1337 prev->dtdv_next = dvar->dtdv_next; 1338 } else { 1339 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1340 start, dvar->dtdv_next) != start) { 1341 /* 1342 * We have failed to atomically swing the 1343 * hash table head pointer, presumably because 1344 * of a conflicting allocation on another CPU. 1345 * We need to reread the hash chain and try 1346 * again. 1347 */ 1348 goto top; 1349 } 1350 } 1351 1352 dtrace_membar_producer(); 1353 1354 /* 1355 * Now set the hash value to indicate that it's free. 1356 */ 1357 ASSERT(hash[bucket].dtdh_chain != dvar); 1358 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1359 1360 dtrace_membar_producer(); 1361 1362 /* 1363 * Set the next pointer to point at the dirty list, and 1364 * atomically swing the dirty pointer to the newly freed dvar. 1365 */ 1366 do { 1367 next = dcpu->dtdsc_dirty; 1368 dvar->dtdv_next = next; 1369 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1370 1371 /* 1372 * Finally, unlock this hash bucket. 1373 */ 1374 ASSERT(hash[bucket].dtdh_lock == lock); 1375 ASSERT(lock & 1); 1376 hash[bucket].dtdh_lock++; 1377 1378 return (NULL); 1379 next: 1380 prev = dvar; 1381 continue; 1382 } 1383 1384 if (dvar == NULL) { 1385 /* 1386 * If dvar is NULL, it is because we went off the rails: 1387 * one of the elements that we traversed in the hash chain 1388 * was deleted while we were traversing it. In this case, 1389 * we assert that we aren't doing a dealloc (deallocs lock 1390 * the hash bucket to prevent themselves from racing with 1391 * one another), and retry the hash chain traversal. 1392 */ 1393 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1394 goto top; 1395 } 1396 1397 if (op != DTRACE_DYNVAR_ALLOC) { 1398 /* 1399 * If we are not to allocate a new variable, we want to 1400 * return NULL now. Before we return, check that the value 1401 * of the lock word hasn't changed. If it has, we may have 1402 * seen an inconsistent snapshot. 1403 */ 1404 if (op == DTRACE_DYNVAR_NOALLOC) { 1405 if (hash[bucket].dtdh_lock != lock) 1406 goto top; 1407 } else { 1408 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1409 ASSERT(hash[bucket].dtdh_lock == lock); 1410 ASSERT(lock & 1); 1411 hash[bucket].dtdh_lock++; 1412 } 1413 1414 return (NULL); 1415 } 1416 1417 /* 1418 * We need to allocate a new dynamic variable. The size we need is the 1419 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1420 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1421 * the size of any referred-to data (dsize). We then round the final 1422 * size up to the chunksize for allocation. 1423 */ 1424 for (ksize = 0, i = 0; i < nkeys; i++) 1425 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1426 1427 /* 1428 * This should be pretty much impossible, but could happen if, say, 1429 * strange DIF specified the tuple. Ideally, this should be an 1430 * assertion and not an error condition -- but that requires that the 1431 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1432 * bullet-proof. (That is, it must not be able to be fooled by 1433 * malicious DIF.) Given the lack of backwards branches in DIF, 1434 * solving this would presumably not amount to solving the Halting 1435 * Problem -- but it still seems awfully hard. 1436 */ 1437 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1438 ksize + dsize > chunksize) { 1439 dcpu->dtdsc_drops++; 1440 return (NULL); 1441 } 1442 1443 nstate = DTRACE_DSTATE_EMPTY; 1444 1445 do { 1446 retry: 1447 free = dcpu->dtdsc_free; 1448 1449 if (free == NULL) { 1450 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1451 void *rval; 1452 1453 if (clean == NULL) { 1454 /* 1455 * We're out of dynamic variable space on 1456 * this CPU. Unless we have tried all CPUs, 1457 * we'll try to allocate from a different 1458 * CPU. 1459 */ 1460 switch (dstate->dtds_state) { 1461 case DTRACE_DSTATE_CLEAN: { 1462 void *sp = &dstate->dtds_state; 1463 1464 if (++cpu >= NCPU) 1465 cpu = 0; 1466 1467 if (dcpu->dtdsc_dirty != NULL && 1468 nstate == DTRACE_DSTATE_EMPTY) 1469 nstate = DTRACE_DSTATE_DIRTY; 1470 1471 if (dcpu->dtdsc_rinsing != NULL) 1472 nstate = DTRACE_DSTATE_RINSING; 1473 1474 dcpu = &dstate->dtds_percpu[cpu]; 1475 1476 if (cpu != me) 1477 goto retry; 1478 1479 (void) dtrace_cas32(sp, 1480 DTRACE_DSTATE_CLEAN, nstate); 1481 1482 /* 1483 * To increment the correct bean 1484 * counter, take another lap. 1485 */ 1486 goto retry; 1487 } 1488 1489 case DTRACE_DSTATE_DIRTY: 1490 dcpu->dtdsc_dirty_drops++; 1491 break; 1492 1493 case DTRACE_DSTATE_RINSING: 1494 dcpu->dtdsc_rinsing_drops++; 1495 break; 1496 1497 case DTRACE_DSTATE_EMPTY: 1498 dcpu->dtdsc_drops++; 1499 break; 1500 } 1501 1502 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1503 return (NULL); 1504 } 1505 1506 /* 1507 * The clean list appears to be non-empty. We want to 1508 * move the clean list to the free list; we start by 1509 * moving the clean pointer aside. 1510 */ 1511 if (dtrace_casptr(&dcpu->dtdsc_clean, 1512 clean, NULL) != clean) { 1513 /* 1514 * We are in one of two situations: 1515 * 1516 * (a) The clean list was switched to the 1517 * free list by another CPU. 1518 * 1519 * (b) The clean list was added to by the 1520 * cleansing cyclic. 1521 * 1522 * In either of these situations, we can 1523 * just reattempt the free list allocation. 1524 */ 1525 goto retry; 1526 } 1527 1528 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1529 1530 /* 1531 * Now we'll move the clean list to the free list. 1532 * It's impossible for this to fail: the only way 1533 * the free list can be updated is through this 1534 * code path, and only one CPU can own the clean list. 1535 * Thus, it would only be possible for this to fail if 1536 * this code were racing with dtrace_dynvar_clean(). 1537 * (That is, if dtrace_dynvar_clean() updated the clean 1538 * list, and we ended up racing to update the free 1539 * list.) This race is prevented by the dtrace_sync() 1540 * in dtrace_dynvar_clean() -- which flushes the 1541 * owners of the clean lists out before resetting 1542 * the clean lists. 1543 */ 1544 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1545 ASSERT(rval == NULL); 1546 goto retry; 1547 } 1548 1549 dvar = free; 1550 new_free = dvar->dtdv_next; 1551 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1552 1553 /* 1554 * We have now allocated a new chunk. We copy the tuple keys into the 1555 * tuple array and copy any referenced key data into the data space 1556 * following the tuple array. As we do this, we relocate dttk_value 1557 * in the final tuple to point to the key data address in the chunk. 1558 */ 1559 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1560 dvar->dtdv_data = (void *)(kdata + ksize); 1561 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1562 1563 for (i = 0; i < nkeys; i++) { 1564 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1565 size_t kesize = key[i].dttk_size; 1566 1567 if (kesize != 0) { 1568 dtrace_bcopy( 1569 (const void *)(uintptr_t)key[i].dttk_value, 1570 (void *)kdata, kesize); 1571 dkey->dttk_value = kdata; 1572 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1573 } else { 1574 dkey->dttk_value = key[i].dttk_value; 1575 } 1576 1577 dkey->dttk_size = kesize; 1578 } 1579 1580 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1581 dvar->dtdv_hashval = hashval; 1582 dvar->dtdv_next = start; 1583 1584 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1585 return (dvar); 1586 1587 /* 1588 * The cas has failed. Either another CPU is adding an element to 1589 * this hash chain, or another CPU is deleting an element from this 1590 * hash chain. The simplest way to deal with both of these cases 1591 * (though not necessarily the most efficient) is to free our 1592 * allocated block and tail-call ourselves. Note that the free is 1593 * to the dirty list and _not_ to the free list. This is to prevent 1594 * races with allocators, above. 1595 */ 1596 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1597 1598 dtrace_membar_producer(); 1599 1600 do { 1601 free = dcpu->dtdsc_dirty; 1602 dvar->dtdv_next = free; 1603 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1604 1605 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1606 } 1607 1608 /*ARGSUSED*/ 1609 static void 1610 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1611 { 1612 if (nval < *oval) 1613 *oval = nval; 1614 } 1615 1616 /*ARGSUSED*/ 1617 static void 1618 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1619 { 1620 if (nval > *oval) 1621 *oval = nval; 1622 } 1623 1624 static void 1625 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1626 { 1627 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1628 int64_t val = (int64_t)nval; 1629 1630 if (val < 0) { 1631 for (i = 0; i < zero; i++) { 1632 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1633 quanta[i] += incr; 1634 return; 1635 } 1636 } 1637 } else { 1638 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1639 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1640 quanta[i - 1] += incr; 1641 return; 1642 } 1643 } 1644 1645 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1646 return; 1647 } 1648 1649 ASSERT(0); 1650 } 1651 1652 static void 1653 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1654 { 1655 uint64_t arg = *lquanta++; 1656 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1657 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1658 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1659 int32_t val = (int32_t)nval, level; 1660 1661 ASSERT(step != 0); 1662 ASSERT(levels != 0); 1663 1664 if (val < base) { 1665 /* 1666 * This is an underflow. 1667 */ 1668 lquanta[0] += incr; 1669 return; 1670 } 1671 1672 level = (val - base) / step; 1673 1674 if (level < levels) { 1675 lquanta[level + 1] += incr; 1676 return; 1677 } 1678 1679 /* 1680 * This is an overflow. 1681 */ 1682 lquanta[levels + 1] += incr; 1683 } 1684 1685 /*ARGSUSED*/ 1686 static void 1687 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1688 { 1689 data[0]++; 1690 data[1] += nval; 1691 } 1692 1693 /*ARGSUSED*/ 1694 static void 1695 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1696 { 1697 *oval = *oval + 1; 1698 } 1699 1700 /*ARGSUSED*/ 1701 static void 1702 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1703 { 1704 *oval += nval; 1705 } 1706 1707 /* 1708 * Aggregate given the tuple in the principal data buffer, and the aggregating 1709 * action denoted by the specified dtrace_aggregation_t. The aggregation 1710 * buffer is specified as the buf parameter. This routine does not return 1711 * failure; if there is no space in the aggregation buffer, the data will be 1712 * dropped, and a corresponding counter incremented. 1713 */ 1714 static void 1715 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1716 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1717 { 1718 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1719 uint32_t i, ndx, size, fsize; 1720 uint32_t align = sizeof (uint64_t) - 1; 1721 dtrace_aggbuffer_t *agb; 1722 dtrace_aggkey_t *key; 1723 uint32_t hashval = 0, limit, isstr; 1724 caddr_t tomax, data, kdata; 1725 dtrace_actkind_t action; 1726 dtrace_action_t *act; 1727 uintptr_t offs; 1728 1729 if (buf == NULL) 1730 return; 1731 1732 if (!agg->dtag_hasarg) { 1733 /* 1734 * Currently, only quantize() and lquantize() take additional 1735 * arguments, and they have the same semantics: an increment 1736 * value that defaults to 1 when not present. If additional 1737 * aggregating actions take arguments, the setting of the 1738 * default argument value will presumably have to become more 1739 * sophisticated... 1740 */ 1741 arg = 1; 1742 } 1743 1744 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1745 size = rec->dtrd_offset - agg->dtag_base; 1746 fsize = size + rec->dtrd_size; 1747 1748 ASSERT(dbuf->dtb_tomax != NULL); 1749 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1750 1751 if ((tomax = buf->dtb_tomax) == NULL) { 1752 dtrace_buffer_drop(buf); 1753 return; 1754 } 1755 1756 /* 1757 * The metastructure is always at the bottom of the buffer. 1758 */ 1759 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1760 sizeof (dtrace_aggbuffer_t)); 1761 1762 if (buf->dtb_offset == 0) { 1763 /* 1764 * We just kludge up approximately 1/8th of the size to be 1765 * buckets. If this guess ends up being routinely 1766 * off-the-mark, we may need to dynamically readjust this 1767 * based on past performance. 1768 */ 1769 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1770 1771 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1772 (uintptr_t)tomax || hashsize == 0) { 1773 /* 1774 * We've been given a ludicrously small buffer; 1775 * increment our drop count and leave. 1776 */ 1777 dtrace_buffer_drop(buf); 1778 return; 1779 } 1780 1781 /* 1782 * And now, a pathetic attempt to try to get a an odd (or 1783 * perchance, a prime) hash size for better hash distribution. 1784 */ 1785 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1786 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1787 1788 agb->dtagb_hashsize = hashsize; 1789 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1790 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1791 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1792 1793 for (i = 0; i < agb->dtagb_hashsize; i++) 1794 agb->dtagb_hash[i] = NULL; 1795 } 1796 1797 ASSERT(agg->dtag_first != NULL); 1798 ASSERT(agg->dtag_first->dta_intuple); 1799 1800 /* 1801 * Calculate the hash value based on the key. Note that we _don't_ 1802 * include the aggid in the hashing (but we will store it as part of 1803 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1804 * algorithm: a simple, quick algorithm that has no known funnels, and 1805 * gets good distribution in practice. The efficacy of the hashing 1806 * algorithm (and a comparison with other algorithms) may be found by 1807 * running the ::dtrace_aggstat MDB dcmd. 1808 */ 1809 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1810 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1811 limit = i + act->dta_rec.dtrd_size; 1812 ASSERT(limit <= size); 1813 isstr = DTRACEACT_ISSTRING(act); 1814 1815 for (; i < limit; i++) { 1816 hashval += data[i]; 1817 hashval += (hashval << 10); 1818 hashval ^= (hashval >> 6); 1819 1820 if (isstr && data[i] == '\0') 1821 break; 1822 } 1823 } 1824 1825 hashval += (hashval << 3); 1826 hashval ^= (hashval >> 11); 1827 hashval += (hashval << 15); 1828 1829 /* 1830 * Yes, the divide here is expensive -- but it's generally the least 1831 * of the performance issues given the amount of data that we iterate 1832 * over to compute hash values, compare data, etc. 1833 */ 1834 ndx = hashval % agb->dtagb_hashsize; 1835 1836 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1837 ASSERT((caddr_t)key >= tomax); 1838 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1839 1840 if (hashval != key->dtak_hashval || key->dtak_size != size) 1841 continue; 1842 1843 kdata = key->dtak_data; 1844 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1845 1846 for (act = agg->dtag_first; act->dta_intuple; 1847 act = act->dta_next) { 1848 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1849 limit = i + act->dta_rec.dtrd_size; 1850 ASSERT(limit <= size); 1851 isstr = DTRACEACT_ISSTRING(act); 1852 1853 for (; i < limit; i++) { 1854 if (kdata[i] != data[i]) 1855 goto next; 1856 1857 if (isstr && data[i] == '\0') 1858 break; 1859 } 1860 } 1861 1862 if (action != key->dtak_action) { 1863 /* 1864 * We are aggregating on the same value in the same 1865 * aggregation with two different aggregating actions. 1866 * (This should have been picked up in the compiler, 1867 * so we may be dealing with errant or devious DIF.) 1868 * This is an error condition; we indicate as much, 1869 * and return. 1870 */ 1871 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1872 return; 1873 } 1874 1875 /* 1876 * This is a hit: we need to apply the aggregator to 1877 * the value at this key. 1878 */ 1879 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1880 return; 1881 next: 1882 continue; 1883 } 1884 1885 /* 1886 * We didn't find it. We need to allocate some zero-filled space, 1887 * link it into the hash table appropriately, and apply the aggregator 1888 * to the (zero-filled) value. 1889 */ 1890 offs = buf->dtb_offset; 1891 while (offs & (align - 1)) 1892 offs += sizeof (uint32_t); 1893 1894 /* 1895 * If we don't have enough room to both allocate a new key _and_ 1896 * its associated data, increment the drop count and return. 1897 */ 1898 if ((uintptr_t)tomax + offs + fsize > 1899 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1900 dtrace_buffer_drop(buf); 1901 return; 1902 } 1903 1904 /*CONSTCOND*/ 1905 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1906 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1907 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1908 1909 key->dtak_data = kdata = tomax + offs; 1910 buf->dtb_offset = offs + fsize; 1911 1912 /* 1913 * Now copy the data across. 1914 */ 1915 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1916 1917 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1918 kdata[i] = data[i]; 1919 1920 /* 1921 * Because strings are not zeroed out by default, we need to iterate 1922 * looking for actions that store strings, and we need to explicitly 1923 * pad these strings out with zeroes. 1924 */ 1925 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1926 int nul; 1927 1928 if (!DTRACEACT_ISSTRING(act)) 1929 continue; 1930 1931 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1932 limit = i + act->dta_rec.dtrd_size; 1933 ASSERT(limit <= size); 1934 1935 for (nul = 0; i < limit; i++) { 1936 if (nul) { 1937 kdata[i] = '\0'; 1938 continue; 1939 } 1940 1941 if (data[i] != '\0') 1942 continue; 1943 1944 nul = 1; 1945 } 1946 } 1947 1948 for (i = size; i < fsize; i++) 1949 kdata[i] = 0; 1950 1951 key->dtak_hashval = hashval; 1952 key->dtak_size = size; 1953 key->dtak_action = action; 1954 key->dtak_next = agb->dtagb_hash[ndx]; 1955 agb->dtagb_hash[ndx] = key; 1956 1957 /* 1958 * Finally, apply the aggregator. 1959 */ 1960 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1961 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1962 } 1963 1964 /* 1965 * Given consumer state, this routine finds a speculation in the INACTIVE 1966 * state and transitions it into the ACTIVE state. If there is no speculation 1967 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1968 * incremented -- it is up to the caller to take appropriate action. 1969 */ 1970 static int 1971 dtrace_speculation(dtrace_state_t *state) 1972 { 1973 int i = 0; 1974 dtrace_speculation_state_t current; 1975 uint32_t *stat = &state->dts_speculations_unavail, count; 1976 1977 while (i < state->dts_nspeculations) { 1978 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1979 1980 current = spec->dtsp_state; 1981 1982 if (current != DTRACESPEC_INACTIVE) { 1983 if (current == DTRACESPEC_COMMITTINGMANY || 1984 current == DTRACESPEC_COMMITTING || 1985 current == DTRACESPEC_DISCARDING) 1986 stat = &state->dts_speculations_busy; 1987 i++; 1988 continue; 1989 } 1990 1991 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1992 current, DTRACESPEC_ACTIVE) == current) 1993 return (i + 1); 1994 } 1995 1996 /* 1997 * We couldn't find a speculation. If we found as much as a single 1998 * busy speculation buffer, we'll attribute this failure as "busy" 1999 * instead of "unavail". 2000 */ 2001 do { 2002 count = *stat; 2003 } while (dtrace_cas32(stat, count, count + 1) != count); 2004 2005 return (0); 2006 } 2007 2008 /* 2009 * This routine commits an active speculation. If the specified speculation 2010 * is not in a valid state to perform a commit(), this routine will silently do 2011 * nothing. The state of the specified speculation is transitioned according 2012 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2013 */ 2014 static void 2015 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2016 dtrace_specid_t which) 2017 { 2018 dtrace_speculation_t *spec; 2019 dtrace_buffer_t *src, *dest; 2020 uintptr_t daddr, saddr, dlimit; 2021 dtrace_speculation_state_t current, new; 2022 intptr_t offs; 2023 2024 if (which == 0) 2025 return; 2026 2027 if (which > state->dts_nspeculations) { 2028 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2029 return; 2030 } 2031 2032 spec = &state->dts_speculations[which - 1]; 2033 src = &spec->dtsp_buffer[cpu]; 2034 dest = &state->dts_buffer[cpu]; 2035 2036 do { 2037 current = spec->dtsp_state; 2038 2039 if (current == DTRACESPEC_COMMITTINGMANY) 2040 break; 2041 2042 switch (current) { 2043 case DTRACESPEC_INACTIVE: 2044 case DTRACESPEC_DISCARDING: 2045 return; 2046 2047 case DTRACESPEC_COMMITTING: 2048 /* 2049 * This is only possible if we are (a) commit()'ing 2050 * without having done a prior speculate() on this CPU 2051 * and (b) racing with another commit() on a different 2052 * CPU. There's nothing to do -- we just assert that 2053 * our offset is 0. 2054 */ 2055 ASSERT(src->dtb_offset == 0); 2056 return; 2057 2058 case DTRACESPEC_ACTIVE: 2059 new = DTRACESPEC_COMMITTING; 2060 break; 2061 2062 case DTRACESPEC_ACTIVEONE: 2063 /* 2064 * This speculation is active on one CPU. If our 2065 * buffer offset is non-zero, we know that the one CPU 2066 * must be us. Otherwise, we are committing on a 2067 * different CPU from the speculate(), and we must 2068 * rely on being asynchronously cleaned. 2069 */ 2070 if (src->dtb_offset != 0) { 2071 new = DTRACESPEC_COMMITTING; 2072 break; 2073 } 2074 /*FALLTHROUGH*/ 2075 2076 case DTRACESPEC_ACTIVEMANY: 2077 new = DTRACESPEC_COMMITTINGMANY; 2078 break; 2079 2080 default: 2081 ASSERT(0); 2082 } 2083 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2084 current, new) != current); 2085 2086 /* 2087 * We have set the state to indicate that we are committing this 2088 * speculation. Now reserve the necessary space in the destination 2089 * buffer. 2090 */ 2091 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2092 sizeof (uint64_t), state, NULL)) < 0) { 2093 dtrace_buffer_drop(dest); 2094 goto out; 2095 } 2096 2097 /* 2098 * We have the space; copy the buffer across. (Note that this is a 2099 * highly subobtimal bcopy(); in the unlikely event that this becomes 2100 * a serious performance issue, a high-performance DTrace-specific 2101 * bcopy() should obviously be invented.) 2102 */ 2103 daddr = (uintptr_t)dest->dtb_tomax + offs; 2104 dlimit = daddr + src->dtb_offset; 2105 saddr = (uintptr_t)src->dtb_tomax; 2106 2107 /* 2108 * First, the aligned portion. 2109 */ 2110 while (dlimit - daddr >= sizeof (uint64_t)) { 2111 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2112 2113 daddr += sizeof (uint64_t); 2114 saddr += sizeof (uint64_t); 2115 } 2116 2117 /* 2118 * Now any left-over bit... 2119 */ 2120 while (dlimit - daddr) 2121 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2122 2123 /* 2124 * Finally, commit the reserved space in the destination buffer. 2125 */ 2126 dest->dtb_offset = offs + src->dtb_offset; 2127 2128 out: 2129 /* 2130 * If we're lucky enough to be the only active CPU on this speculation 2131 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2132 */ 2133 if (current == DTRACESPEC_ACTIVE || 2134 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2135 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2136 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2137 2138 ASSERT(rval == DTRACESPEC_COMMITTING); 2139 } 2140 2141 src->dtb_offset = 0; 2142 src->dtb_xamot_drops += src->dtb_drops; 2143 src->dtb_drops = 0; 2144 } 2145 2146 /* 2147 * This routine discards an active speculation. If the specified speculation 2148 * is not in a valid state to perform a discard(), this routine will silently 2149 * do nothing. The state of the specified speculation is transitioned 2150 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2151 */ 2152 static void 2153 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2154 dtrace_specid_t which) 2155 { 2156 dtrace_speculation_t *spec; 2157 dtrace_speculation_state_t current, new; 2158 dtrace_buffer_t *buf; 2159 2160 if (which == 0) 2161 return; 2162 2163 if (which > state->dts_nspeculations) { 2164 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2165 return; 2166 } 2167 2168 spec = &state->dts_speculations[which - 1]; 2169 buf = &spec->dtsp_buffer[cpu]; 2170 2171 do { 2172 current = spec->dtsp_state; 2173 2174 switch (current) { 2175 case DTRACESPEC_INACTIVE: 2176 case DTRACESPEC_COMMITTINGMANY: 2177 case DTRACESPEC_COMMITTING: 2178 case DTRACESPEC_DISCARDING: 2179 return; 2180 2181 case DTRACESPEC_ACTIVE: 2182 case DTRACESPEC_ACTIVEMANY: 2183 new = DTRACESPEC_DISCARDING; 2184 break; 2185 2186 case DTRACESPEC_ACTIVEONE: 2187 if (buf->dtb_offset != 0) { 2188 new = DTRACESPEC_INACTIVE; 2189 } else { 2190 new = DTRACESPEC_DISCARDING; 2191 } 2192 break; 2193 2194 default: 2195 ASSERT(0); 2196 } 2197 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2198 current, new) != current); 2199 2200 buf->dtb_offset = 0; 2201 buf->dtb_drops = 0; 2202 } 2203 2204 /* 2205 * Note: not called from probe context. This function is called 2206 * asynchronously from cross call context to clean any speculations that are 2207 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2208 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2209 * speculation. 2210 */ 2211 static void 2212 dtrace_speculation_clean_here(dtrace_state_t *state) 2213 { 2214 dtrace_icookie_t cookie; 2215 processorid_t cpu = CPU->cpu_id; 2216 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2217 dtrace_specid_t i; 2218 2219 cookie = dtrace_interrupt_disable(); 2220 2221 if (dest->dtb_tomax == NULL) { 2222 dtrace_interrupt_enable(cookie); 2223 return; 2224 } 2225 2226 for (i = 0; i < state->dts_nspeculations; i++) { 2227 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2228 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2229 2230 if (src->dtb_tomax == NULL) 2231 continue; 2232 2233 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2234 src->dtb_offset = 0; 2235 continue; 2236 } 2237 2238 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2239 continue; 2240 2241 if (src->dtb_offset == 0) 2242 continue; 2243 2244 dtrace_speculation_commit(state, cpu, i + 1); 2245 } 2246 2247 dtrace_interrupt_enable(cookie); 2248 } 2249 2250 /* 2251 * Note: not called from probe context. This function is called 2252 * asynchronously (and at a regular interval) to clean any speculations that 2253 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2254 * is work to be done, it cross calls all CPUs to perform that work; 2255 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2256 * INACTIVE state until they have been cleaned by all CPUs. 2257 */ 2258 static void 2259 dtrace_speculation_clean(dtrace_state_t *state) 2260 { 2261 int work = 0, rv; 2262 dtrace_specid_t i; 2263 2264 for (i = 0; i < state->dts_nspeculations; i++) { 2265 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2266 2267 ASSERT(!spec->dtsp_cleaning); 2268 2269 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2270 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2271 continue; 2272 2273 work++; 2274 spec->dtsp_cleaning = 1; 2275 } 2276 2277 if (!work) 2278 return; 2279 2280 dtrace_xcall(DTRACE_CPUALL, 2281 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2282 2283 /* 2284 * We now know that all CPUs have committed or discarded their 2285 * speculation buffers, as appropriate. We can now set the state 2286 * to inactive. 2287 */ 2288 for (i = 0; i < state->dts_nspeculations; i++) { 2289 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2290 dtrace_speculation_state_t current, new; 2291 2292 if (!spec->dtsp_cleaning) 2293 continue; 2294 2295 current = spec->dtsp_state; 2296 ASSERT(current == DTRACESPEC_DISCARDING || 2297 current == DTRACESPEC_COMMITTINGMANY); 2298 2299 new = DTRACESPEC_INACTIVE; 2300 2301 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2302 ASSERT(rv == current); 2303 spec->dtsp_cleaning = 0; 2304 } 2305 } 2306 2307 /* 2308 * Called as part of a speculate() to get the speculative buffer associated 2309 * with a given speculation. Returns NULL if the specified speculation is not 2310 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2311 * the active CPU is not the specified CPU -- the speculation will be 2312 * atomically transitioned into the ACTIVEMANY state. 2313 */ 2314 static dtrace_buffer_t * 2315 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2316 dtrace_specid_t which) 2317 { 2318 dtrace_speculation_t *spec; 2319 dtrace_speculation_state_t current, new; 2320 dtrace_buffer_t *buf; 2321 2322 if (which == 0) 2323 return (NULL); 2324 2325 if (which > state->dts_nspeculations) { 2326 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2327 return (NULL); 2328 } 2329 2330 spec = &state->dts_speculations[which - 1]; 2331 buf = &spec->dtsp_buffer[cpuid]; 2332 2333 do { 2334 current = spec->dtsp_state; 2335 2336 switch (current) { 2337 case DTRACESPEC_INACTIVE: 2338 case DTRACESPEC_COMMITTINGMANY: 2339 case DTRACESPEC_DISCARDING: 2340 return (NULL); 2341 2342 case DTRACESPEC_COMMITTING: 2343 ASSERT(buf->dtb_offset == 0); 2344 return (NULL); 2345 2346 case DTRACESPEC_ACTIVEONE: 2347 /* 2348 * This speculation is currently active on one CPU. 2349 * Check the offset in the buffer; if it's non-zero, 2350 * that CPU must be us (and we leave the state alone). 2351 * If it's zero, assume that we're starting on a new 2352 * CPU -- and change the state to indicate that the 2353 * speculation is active on more than one CPU. 2354 */ 2355 if (buf->dtb_offset != 0) 2356 return (buf); 2357 2358 new = DTRACESPEC_ACTIVEMANY; 2359 break; 2360 2361 case DTRACESPEC_ACTIVEMANY: 2362 return (buf); 2363 2364 case DTRACESPEC_ACTIVE: 2365 new = DTRACESPEC_ACTIVEONE; 2366 break; 2367 2368 default: 2369 ASSERT(0); 2370 } 2371 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2372 current, new) != current); 2373 2374 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2375 return (buf); 2376 } 2377 2378 /* 2379 * Return a string. In the event that the user lacks the privilege to access 2380 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2381 * don't fail access checking. 2382 * 2383 * dtrace_dif_variable() uses this routine as a helper for various 2384 * builtin values such as 'execname' and 'probefunc.' 2385 */ 2386 uintptr_t 2387 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2388 dtrace_mstate_t *mstate) 2389 { 2390 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2391 uintptr_t ret; 2392 size_t strsz; 2393 2394 /* 2395 * The easy case: this probe is allowed to read all of memory, so 2396 * we can just return this as a vanilla pointer. 2397 */ 2398 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2399 return (addr); 2400 2401 /* 2402 * This is the tougher case: we copy the string in question from 2403 * kernel memory into scratch memory and return it that way: this 2404 * ensures that we won't trip up when access checking tests the 2405 * BYREF return value. 2406 */ 2407 strsz = dtrace_strlen((char *)addr, size) + 1; 2408 2409 if (mstate->dtms_scratch_ptr + strsz > 2410 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2411 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2412 return (NULL); 2413 } 2414 2415 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2416 strsz); 2417 ret = mstate->dtms_scratch_ptr; 2418 mstate->dtms_scratch_ptr += strsz; 2419 return (ret); 2420 } 2421 2422 /* 2423 * This function implements the DIF emulator's variable lookups. The emulator 2424 * passes a reserved variable identifier and optional built-in array index. 2425 */ 2426 static uint64_t 2427 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2428 uint64_t ndx) 2429 { 2430 /* 2431 * If we're accessing one of the uncached arguments, we'll turn this 2432 * into a reference in the args array. 2433 */ 2434 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2435 ndx = v - DIF_VAR_ARG0; 2436 v = DIF_VAR_ARGS; 2437 } 2438 2439 switch (v) { 2440 case DIF_VAR_ARGS: 2441 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2442 if (ndx >= sizeof (mstate->dtms_arg) / 2443 sizeof (mstate->dtms_arg[0])) { 2444 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2445 dtrace_provider_t *pv; 2446 uint64_t val; 2447 2448 pv = mstate->dtms_probe->dtpr_provider; 2449 if (pv->dtpv_pops.dtps_getargval != NULL) 2450 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2451 mstate->dtms_probe->dtpr_id, 2452 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2453 else 2454 val = dtrace_getarg(ndx, aframes); 2455 2456 /* 2457 * This is regrettably required to keep the compiler 2458 * from tail-optimizing the call to dtrace_getarg(). 2459 * The condition always evaluates to true, but the 2460 * compiler has no way of figuring that out a priori. 2461 * (None of this would be necessary if the compiler 2462 * could be relied upon to _always_ tail-optimize 2463 * the call to dtrace_getarg() -- but it can't.) 2464 */ 2465 if (mstate->dtms_probe != NULL) 2466 return (val); 2467 2468 ASSERT(0); 2469 } 2470 2471 return (mstate->dtms_arg[ndx]); 2472 2473 case DIF_VAR_UREGS: { 2474 klwp_t *lwp; 2475 2476 if (!dtrace_priv_proc(state)) 2477 return (0); 2478 2479 if ((lwp = curthread->t_lwp) == NULL) { 2480 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2481 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2482 return (0); 2483 } 2484 2485 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2486 } 2487 2488 case DIF_VAR_CURTHREAD: 2489 if (!dtrace_priv_kernel(state)) 2490 return (0); 2491 return ((uint64_t)(uintptr_t)curthread); 2492 2493 case DIF_VAR_TIMESTAMP: 2494 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2495 mstate->dtms_timestamp = dtrace_gethrtime(); 2496 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2497 } 2498 return (mstate->dtms_timestamp); 2499 2500 case DIF_VAR_VTIMESTAMP: 2501 ASSERT(dtrace_vtime_references != 0); 2502 return (curthread->t_dtrace_vtime); 2503 2504 case DIF_VAR_WALLTIMESTAMP: 2505 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2506 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2507 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2508 } 2509 return (mstate->dtms_walltimestamp); 2510 2511 case DIF_VAR_IPL: 2512 if (!dtrace_priv_kernel(state)) 2513 return (0); 2514 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2515 mstate->dtms_ipl = dtrace_getipl(); 2516 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2517 } 2518 return (mstate->dtms_ipl); 2519 2520 case DIF_VAR_EPID: 2521 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2522 return (mstate->dtms_epid); 2523 2524 case DIF_VAR_ID: 2525 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2526 return (mstate->dtms_probe->dtpr_id); 2527 2528 case DIF_VAR_STACKDEPTH: 2529 if (!dtrace_priv_kernel(state)) 2530 return (0); 2531 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2532 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2533 2534 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2535 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2536 } 2537 return (mstate->dtms_stackdepth); 2538 2539 case DIF_VAR_USTACKDEPTH: 2540 if (!dtrace_priv_proc(state)) 2541 return (0); 2542 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2543 /* 2544 * See comment in DIF_VAR_PID. 2545 */ 2546 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2547 CPU_ON_INTR(CPU)) { 2548 mstate->dtms_ustackdepth = 0; 2549 } else { 2550 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2551 mstate->dtms_ustackdepth = 2552 dtrace_getustackdepth(); 2553 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2554 } 2555 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2556 } 2557 return (mstate->dtms_ustackdepth); 2558 2559 case DIF_VAR_CALLER: 2560 if (!dtrace_priv_kernel(state)) 2561 return (0); 2562 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2563 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2564 2565 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2566 /* 2567 * If this is an unanchored probe, we are 2568 * required to go through the slow path: 2569 * dtrace_caller() only guarantees correct 2570 * results for anchored probes. 2571 */ 2572 pc_t caller[2]; 2573 2574 dtrace_getpcstack(caller, 2, aframes, 2575 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2576 mstate->dtms_caller = caller[1]; 2577 } else if ((mstate->dtms_caller = 2578 dtrace_caller(aframes)) == -1) { 2579 /* 2580 * We have failed to do this the quick way; 2581 * we must resort to the slower approach of 2582 * calling dtrace_getpcstack(). 2583 */ 2584 pc_t caller; 2585 2586 dtrace_getpcstack(&caller, 1, aframes, NULL); 2587 mstate->dtms_caller = caller; 2588 } 2589 2590 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2591 } 2592 return (mstate->dtms_caller); 2593 2594 case DIF_VAR_UCALLER: 2595 if (!dtrace_priv_proc(state)) 2596 return (0); 2597 2598 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2599 uint64_t ustack[3]; 2600 2601 /* 2602 * dtrace_getupcstack() fills in the first uint64_t 2603 * with the current PID. The second uint64_t will 2604 * be the program counter at user-level. The third 2605 * uint64_t will contain the caller, which is what 2606 * we're after. 2607 */ 2608 ustack[2] = NULL; 2609 dtrace_getupcstack(ustack, 3); 2610 mstate->dtms_ucaller = ustack[2]; 2611 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2612 } 2613 2614 return (mstate->dtms_ucaller); 2615 2616 case DIF_VAR_PROBEPROV: 2617 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2618 return (dtrace_dif_varstr( 2619 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2620 state, mstate)); 2621 2622 case DIF_VAR_PROBEMOD: 2623 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2624 return (dtrace_dif_varstr( 2625 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2626 state, mstate)); 2627 2628 case DIF_VAR_PROBEFUNC: 2629 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2630 return (dtrace_dif_varstr( 2631 (uintptr_t)mstate->dtms_probe->dtpr_func, 2632 state, mstate)); 2633 2634 case DIF_VAR_PROBENAME: 2635 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2636 return (dtrace_dif_varstr( 2637 (uintptr_t)mstate->dtms_probe->dtpr_name, 2638 state, mstate)); 2639 2640 case DIF_VAR_PID: 2641 if (!dtrace_priv_proc(state)) 2642 return (0); 2643 2644 /* 2645 * Note that we are assuming that an unanchored probe is 2646 * always due to a high-level interrupt. (And we're assuming 2647 * that there is only a single high level interrupt.) 2648 */ 2649 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2650 return (pid0.pid_id); 2651 2652 /* 2653 * It is always safe to dereference one's own t_procp pointer: 2654 * it always points to a valid, allocated proc structure. 2655 * Further, it is always safe to dereference the p_pidp member 2656 * of one's own proc structure. (These are truisms becuase 2657 * threads and processes don't clean up their own state -- 2658 * they leave that task to whomever reaps them.) 2659 */ 2660 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2661 2662 case DIF_VAR_PPID: 2663 if (!dtrace_priv_proc(state)) 2664 return (0); 2665 2666 /* 2667 * See comment in DIF_VAR_PID. 2668 */ 2669 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2670 return (pid0.pid_id); 2671 2672 /* 2673 * It is always safe to dereference one's own t_procp pointer: 2674 * it always points to a valid, allocated proc structure. 2675 * (This is true because threads don't clean up their own 2676 * state -- they leave that task to whomever reaps them.) 2677 */ 2678 return ((uint64_t)curthread->t_procp->p_ppid); 2679 2680 case DIF_VAR_TID: 2681 /* 2682 * See comment in DIF_VAR_PID. 2683 */ 2684 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2685 return (0); 2686 2687 return ((uint64_t)curthread->t_tid); 2688 2689 case DIF_VAR_EXECNAME: 2690 if (!dtrace_priv_proc(state)) 2691 return (0); 2692 2693 /* 2694 * See comment in DIF_VAR_PID. 2695 */ 2696 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2697 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2698 2699 /* 2700 * It is always safe to dereference one's own t_procp pointer: 2701 * it always points to a valid, allocated proc structure. 2702 * (This is true because threads don't clean up their own 2703 * state -- they leave that task to whomever reaps them.) 2704 */ 2705 return (dtrace_dif_varstr( 2706 (uintptr_t)curthread->t_procp->p_user.u_comm, 2707 state, mstate)); 2708 2709 case DIF_VAR_ZONENAME: 2710 if (!dtrace_priv_proc(state)) 2711 return (0); 2712 2713 /* 2714 * See comment in DIF_VAR_PID. 2715 */ 2716 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2717 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2718 2719 /* 2720 * It is always safe to dereference one's own t_procp pointer: 2721 * it always points to a valid, allocated proc structure. 2722 * (This is true because threads don't clean up their own 2723 * state -- they leave that task to whomever reaps them.) 2724 */ 2725 return (dtrace_dif_varstr( 2726 (uintptr_t)curthread->t_procp->p_zone->zone_name, 2727 state, mstate)); 2728 2729 case DIF_VAR_UID: 2730 if (!dtrace_priv_proc(state)) 2731 return (0); 2732 2733 /* 2734 * See comment in DIF_VAR_PID. 2735 */ 2736 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2737 return ((uint64_t)p0.p_cred->cr_uid); 2738 2739 /* 2740 * It is always safe to dereference one's own t_procp pointer: 2741 * it always points to a valid, allocated proc structure. 2742 * (This is true because threads don't clean up their own 2743 * state -- they leave that task to whomever reaps them.) 2744 * 2745 * Additionally, it is safe to dereference one's own process 2746 * credential, since this is never NULL after process birth. 2747 */ 2748 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 2749 2750 case DIF_VAR_GID: 2751 if (!dtrace_priv_proc(state)) 2752 return (0); 2753 2754 /* 2755 * See comment in DIF_VAR_PID. 2756 */ 2757 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2758 return ((uint64_t)p0.p_cred->cr_gid); 2759 2760 /* 2761 * It is always safe to dereference one's own t_procp pointer: 2762 * it always points to a valid, allocated proc structure. 2763 * (This is true because threads don't clean up their own 2764 * state -- they leave that task to whomever reaps them.) 2765 * 2766 * Additionally, it is safe to dereference one's own process 2767 * credential, since this is never NULL after process birth. 2768 */ 2769 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 2770 2771 case DIF_VAR_ERRNO: { 2772 klwp_t *lwp; 2773 if (!dtrace_priv_proc(state)) 2774 return (0); 2775 2776 /* 2777 * See comment in DIF_VAR_PID. 2778 */ 2779 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2780 return (0); 2781 2782 /* 2783 * It is always safe to dereference one's own t_lwp pointer in 2784 * the event that this pointer is non-NULL. (This is true 2785 * because threads and lwps don't clean up their own state -- 2786 * they leave that task to whomever reaps them.) 2787 */ 2788 if ((lwp = curthread->t_lwp) == NULL) 2789 return (0); 2790 2791 return ((uint64_t)lwp->lwp_errno); 2792 } 2793 default: 2794 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2795 return (0); 2796 } 2797 } 2798 2799 /* 2800 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2801 * Notice that we don't bother validating the proper number of arguments or 2802 * their types in the tuple stack. This isn't needed because all argument 2803 * interpretation is safe because of our load safety -- the worst that can 2804 * happen is that a bogus program can obtain bogus results. 2805 */ 2806 static void 2807 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2808 dtrace_key_t *tupregs, int nargs, 2809 dtrace_mstate_t *mstate, dtrace_state_t *state) 2810 { 2811 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2812 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2813 dtrace_vstate_t *vstate = &state->dts_vstate; 2814 2815 union { 2816 mutex_impl_t mi; 2817 uint64_t mx; 2818 } m; 2819 2820 union { 2821 krwlock_t ri; 2822 uintptr_t rw; 2823 } r; 2824 2825 switch (subr) { 2826 case DIF_SUBR_RAND: 2827 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2828 break; 2829 2830 case DIF_SUBR_MUTEX_OWNED: 2831 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2832 mstate, vstate)) { 2833 regs[rd] = NULL; 2834 break; 2835 } 2836 2837 m.mx = dtrace_load64(tupregs[0].dttk_value); 2838 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2839 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2840 else 2841 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2842 break; 2843 2844 case DIF_SUBR_MUTEX_OWNER: 2845 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2846 mstate, vstate)) { 2847 regs[rd] = NULL; 2848 break; 2849 } 2850 2851 m.mx = dtrace_load64(tupregs[0].dttk_value); 2852 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2853 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2854 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2855 else 2856 regs[rd] = 0; 2857 break; 2858 2859 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2860 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2861 mstate, vstate)) { 2862 regs[rd] = NULL; 2863 break; 2864 } 2865 2866 m.mx = dtrace_load64(tupregs[0].dttk_value); 2867 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2868 break; 2869 2870 case DIF_SUBR_MUTEX_TYPE_SPIN: 2871 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2872 mstate, vstate)) { 2873 regs[rd] = NULL; 2874 break; 2875 } 2876 2877 m.mx = dtrace_load64(tupregs[0].dttk_value); 2878 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2879 break; 2880 2881 case DIF_SUBR_RW_READ_HELD: { 2882 uintptr_t tmp; 2883 2884 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 2885 mstate, vstate)) { 2886 regs[rd] = NULL; 2887 break; 2888 } 2889 2890 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2891 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2892 break; 2893 } 2894 2895 case DIF_SUBR_RW_WRITE_HELD: 2896 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2897 mstate, vstate)) { 2898 regs[rd] = NULL; 2899 break; 2900 } 2901 2902 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2903 regs[rd] = _RW_WRITE_HELD(&r.ri); 2904 break; 2905 2906 case DIF_SUBR_RW_ISWRITER: 2907 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2908 mstate, vstate)) { 2909 regs[rd] = NULL; 2910 break; 2911 } 2912 2913 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2914 regs[rd] = _RW_ISWRITER(&r.ri); 2915 break; 2916 2917 case DIF_SUBR_BCOPY: { 2918 /* 2919 * We need to be sure that the destination is in the scratch 2920 * region -- no other region is allowed. 2921 */ 2922 uintptr_t src = tupregs[0].dttk_value; 2923 uintptr_t dest = tupregs[1].dttk_value; 2924 size_t size = tupregs[2].dttk_value; 2925 2926 if (!dtrace_inscratch(dest, size, mstate)) { 2927 *flags |= CPU_DTRACE_BADADDR; 2928 *illval = regs[rd]; 2929 break; 2930 } 2931 2932 if (!dtrace_canload(src, size, mstate, vstate)) { 2933 regs[rd] = NULL; 2934 break; 2935 } 2936 2937 dtrace_bcopy((void *)src, (void *)dest, size); 2938 break; 2939 } 2940 2941 case DIF_SUBR_ALLOCA: 2942 case DIF_SUBR_COPYIN: { 2943 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2944 uint64_t size = 2945 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2946 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2947 2948 /* 2949 * This action doesn't require any credential checks since 2950 * probes will not activate in user contexts to which the 2951 * enabling user does not have permissions. 2952 */ 2953 2954 /* 2955 * Rounding up the user allocation size could have overflowed 2956 * a large, bogus allocation (like -1ULL) to 0. 2957 */ 2958 if (scratch_size < size || 2959 !DTRACE_INSCRATCH(mstate, scratch_size)) { 2960 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2961 regs[rd] = NULL; 2962 break; 2963 } 2964 2965 if (subr == DIF_SUBR_COPYIN) { 2966 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2967 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 2968 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2969 } 2970 2971 mstate->dtms_scratch_ptr += scratch_size; 2972 regs[rd] = dest; 2973 break; 2974 } 2975 2976 case DIF_SUBR_COPYINTO: { 2977 uint64_t size = tupregs[1].dttk_value; 2978 uintptr_t dest = tupregs[2].dttk_value; 2979 2980 /* 2981 * This action doesn't require any credential checks since 2982 * probes will not activate in user contexts to which the 2983 * enabling user does not have permissions. 2984 */ 2985 if (!dtrace_inscratch(dest, size, mstate)) { 2986 *flags |= CPU_DTRACE_BADADDR; 2987 *illval = regs[rd]; 2988 break; 2989 } 2990 2991 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2992 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 2993 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2994 break; 2995 } 2996 2997 case DIF_SUBR_COPYINSTR: { 2998 uintptr_t dest = mstate->dtms_scratch_ptr; 2999 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3000 3001 if (nargs > 1 && tupregs[1].dttk_value < size) 3002 size = tupregs[1].dttk_value + 1; 3003 3004 /* 3005 * This action doesn't require any credential checks since 3006 * probes will not activate in user contexts to which the 3007 * enabling user does not have permissions. 3008 */ 3009 if (!DTRACE_INSCRATCH(mstate, size)) { 3010 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3011 regs[rd] = NULL; 3012 break; 3013 } 3014 3015 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3016 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3017 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3018 3019 ((char *)dest)[size - 1] = '\0'; 3020 mstate->dtms_scratch_ptr += size; 3021 regs[rd] = dest; 3022 break; 3023 } 3024 3025 case DIF_SUBR_MSGSIZE: 3026 case DIF_SUBR_MSGDSIZE: { 3027 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3028 uintptr_t wptr, rptr; 3029 size_t count = 0; 3030 int cont = 0; 3031 3032 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3033 3034 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3035 vstate)) { 3036 regs[rd] = NULL; 3037 break; 3038 } 3039 3040 wptr = dtrace_loadptr(baddr + 3041 offsetof(mblk_t, b_wptr)); 3042 3043 rptr = dtrace_loadptr(baddr + 3044 offsetof(mblk_t, b_rptr)); 3045 3046 if (wptr < rptr) { 3047 *flags |= CPU_DTRACE_BADADDR; 3048 *illval = tupregs[0].dttk_value; 3049 break; 3050 } 3051 3052 daddr = dtrace_loadptr(baddr + 3053 offsetof(mblk_t, b_datap)); 3054 3055 baddr = dtrace_loadptr(baddr + 3056 offsetof(mblk_t, b_cont)); 3057 3058 /* 3059 * We want to prevent against denial-of-service here, 3060 * so we're only going to search the list for 3061 * dtrace_msgdsize_max mblks. 3062 */ 3063 if (cont++ > dtrace_msgdsize_max) { 3064 *flags |= CPU_DTRACE_ILLOP; 3065 break; 3066 } 3067 3068 if (subr == DIF_SUBR_MSGDSIZE) { 3069 if (dtrace_load8(daddr + 3070 offsetof(dblk_t, db_type)) != M_DATA) 3071 continue; 3072 } 3073 3074 count += wptr - rptr; 3075 } 3076 3077 if (!(*flags & CPU_DTRACE_FAULT)) 3078 regs[rd] = count; 3079 3080 break; 3081 } 3082 3083 case DIF_SUBR_PROGENYOF: { 3084 pid_t pid = tupregs[0].dttk_value; 3085 proc_t *p; 3086 int rval = 0; 3087 3088 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3089 3090 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3091 if (p->p_pidp->pid_id == pid) { 3092 rval = 1; 3093 break; 3094 } 3095 } 3096 3097 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3098 3099 regs[rd] = rval; 3100 break; 3101 } 3102 3103 case DIF_SUBR_SPECULATION: 3104 regs[rd] = dtrace_speculation(state); 3105 break; 3106 3107 case DIF_SUBR_COPYOUT: { 3108 uintptr_t kaddr = tupregs[0].dttk_value; 3109 uintptr_t uaddr = tupregs[1].dttk_value; 3110 uint64_t size = tupregs[2].dttk_value; 3111 3112 if (!dtrace_destructive_disallow && 3113 dtrace_priv_proc_control(state) && 3114 !dtrace_istoxic(kaddr, size)) { 3115 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3116 dtrace_copyout(kaddr, uaddr, size, flags); 3117 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3118 } 3119 break; 3120 } 3121 3122 case DIF_SUBR_COPYOUTSTR: { 3123 uintptr_t kaddr = tupregs[0].dttk_value; 3124 uintptr_t uaddr = tupregs[1].dttk_value; 3125 uint64_t size = tupregs[2].dttk_value; 3126 3127 if (!dtrace_destructive_disallow && 3128 dtrace_priv_proc_control(state) && 3129 !dtrace_istoxic(kaddr, size)) { 3130 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3131 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3132 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3133 } 3134 break; 3135 } 3136 3137 case DIF_SUBR_STRLEN: { 3138 size_t sz; 3139 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3140 sz = dtrace_strlen((char *)addr, 3141 state->dts_options[DTRACEOPT_STRSIZE]); 3142 3143 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3144 regs[rd] = NULL; 3145 break; 3146 } 3147 3148 regs[rd] = sz; 3149 3150 break; 3151 } 3152 3153 case DIF_SUBR_STRCHR: 3154 case DIF_SUBR_STRRCHR: { 3155 /* 3156 * We're going to iterate over the string looking for the 3157 * specified character. We will iterate until we have reached 3158 * the string length or we have found the character. If this 3159 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3160 * of the specified character instead of the first. 3161 */ 3162 uintptr_t saddr = tupregs[0].dttk_value; 3163 uintptr_t addr = tupregs[0].dttk_value; 3164 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3165 char c, target = (char)tupregs[1].dttk_value; 3166 3167 for (regs[rd] = NULL; addr < limit; addr++) { 3168 if ((c = dtrace_load8(addr)) == target) { 3169 regs[rd] = addr; 3170 3171 if (subr == DIF_SUBR_STRCHR) 3172 break; 3173 } 3174 3175 if (c == '\0') 3176 break; 3177 } 3178 3179 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3180 regs[rd] = NULL; 3181 break; 3182 } 3183 3184 break; 3185 } 3186 3187 case DIF_SUBR_STRSTR: 3188 case DIF_SUBR_INDEX: 3189 case DIF_SUBR_RINDEX: { 3190 /* 3191 * We're going to iterate over the string looking for the 3192 * specified string. We will iterate until we have reached 3193 * the string length or we have found the string. (Yes, this 3194 * is done in the most naive way possible -- but considering 3195 * that the string we're searching for is likely to be 3196 * relatively short, the complexity of Rabin-Karp or similar 3197 * hardly seems merited.) 3198 */ 3199 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3200 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3201 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3202 size_t len = dtrace_strlen(addr, size); 3203 size_t sublen = dtrace_strlen(substr, size); 3204 char *limit = addr + len, *orig = addr; 3205 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3206 int inc = 1; 3207 3208 regs[rd] = notfound; 3209 3210 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3211 regs[rd] = NULL; 3212 break; 3213 } 3214 3215 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3216 vstate)) { 3217 regs[rd] = NULL; 3218 break; 3219 } 3220 3221 /* 3222 * strstr() and index()/rindex() have similar semantics if 3223 * both strings are the empty string: strstr() returns a 3224 * pointer to the (empty) string, and index() and rindex() 3225 * both return index 0 (regardless of any position argument). 3226 */ 3227 if (sublen == 0 && len == 0) { 3228 if (subr == DIF_SUBR_STRSTR) 3229 regs[rd] = (uintptr_t)addr; 3230 else 3231 regs[rd] = 0; 3232 break; 3233 } 3234 3235 if (subr != DIF_SUBR_STRSTR) { 3236 if (subr == DIF_SUBR_RINDEX) { 3237 limit = orig - 1; 3238 addr += len; 3239 inc = -1; 3240 } 3241 3242 /* 3243 * Both index() and rindex() take an optional position 3244 * argument that denotes the starting position. 3245 */ 3246 if (nargs == 3) { 3247 int64_t pos = (int64_t)tupregs[2].dttk_value; 3248 3249 /* 3250 * If the position argument to index() is 3251 * negative, Perl implicitly clamps it at 3252 * zero. This semantic is a little surprising 3253 * given the special meaning of negative 3254 * positions to similar Perl functions like 3255 * substr(), but it appears to reflect a 3256 * notion that index() can start from a 3257 * negative index and increment its way up to 3258 * the string. Given this notion, Perl's 3259 * rindex() is at least self-consistent in 3260 * that it implicitly clamps positions greater 3261 * than the string length to be the string 3262 * length. Where Perl completely loses 3263 * coherence, however, is when the specified 3264 * substring is the empty string (""). In 3265 * this case, even if the position is 3266 * negative, rindex() returns 0 -- and even if 3267 * the position is greater than the length, 3268 * index() returns the string length. These 3269 * semantics violate the notion that index() 3270 * should never return a value less than the 3271 * specified position and that rindex() should 3272 * never return a value greater than the 3273 * specified position. (One assumes that 3274 * these semantics are artifacts of Perl's 3275 * implementation and not the results of 3276 * deliberate design -- it beggars belief that 3277 * even Larry Wall could desire such oddness.) 3278 * While in the abstract one would wish for 3279 * consistent position semantics across 3280 * substr(), index() and rindex() -- or at the 3281 * very least self-consistent position 3282 * semantics for index() and rindex() -- we 3283 * instead opt to keep with the extant Perl 3284 * semantics, in all their broken glory. (Do 3285 * we have more desire to maintain Perl's 3286 * semantics than Perl does? Probably.) 3287 */ 3288 if (subr == DIF_SUBR_RINDEX) { 3289 if (pos < 0) { 3290 if (sublen == 0) 3291 regs[rd] = 0; 3292 break; 3293 } 3294 3295 if (pos > len) 3296 pos = len; 3297 } else { 3298 if (pos < 0) 3299 pos = 0; 3300 3301 if (pos >= len) { 3302 if (sublen == 0) 3303 regs[rd] = len; 3304 break; 3305 } 3306 } 3307 3308 addr = orig + pos; 3309 } 3310 } 3311 3312 for (regs[rd] = notfound; addr != limit; addr += inc) { 3313 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3314 if (subr != DIF_SUBR_STRSTR) { 3315 /* 3316 * As D index() and rindex() are 3317 * modeled on Perl (and not on awk), 3318 * we return a zero-based (and not a 3319 * one-based) index. (For you Perl 3320 * weenies: no, we're not going to add 3321 * $[ -- and shouldn't you be at a con 3322 * or something?) 3323 */ 3324 regs[rd] = (uintptr_t)(addr - orig); 3325 break; 3326 } 3327 3328 ASSERT(subr == DIF_SUBR_STRSTR); 3329 regs[rd] = (uintptr_t)addr; 3330 break; 3331 } 3332 } 3333 3334 break; 3335 } 3336 3337 case DIF_SUBR_STRTOK: { 3338 uintptr_t addr = tupregs[0].dttk_value; 3339 uintptr_t tokaddr = tupregs[1].dttk_value; 3340 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3341 uintptr_t limit, toklimit = tokaddr + size; 3342 uint8_t c, tokmap[32]; /* 256 / 8 */ 3343 char *dest = (char *)mstate->dtms_scratch_ptr; 3344 int i; 3345 3346 /* 3347 * Check both the token buffer and (later) the input buffer, 3348 * since both could be non-scratch addresses. 3349 */ 3350 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3351 regs[rd] = NULL; 3352 break; 3353 } 3354 3355 if (!DTRACE_INSCRATCH(mstate, size)) { 3356 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3357 regs[rd] = NULL; 3358 break; 3359 } 3360 3361 if (addr == NULL) { 3362 /* 3363 * If the address specified is NULL, we use our saved 3364 * strtok pointer from the mstate. Note that this 3365 * means that the saved strtok pointer is _only_ 3366 * valid within multiple enablings of the same probe -- 3367 * it behaves like an implicit clause-local variable. 3368 */ 3369 addr = mstate->dtms_strtok; 3370 } else { 3371 /* 3372 * If the user-specified address is non-NULL we must 3373 * access check it. This is the only time we have 3374 * a chance to do so, since this address may reside 3375 * in the string table of this clause-- future calls 3376 * (when we fetch addr from mstate->dtms_strtok) 3377 * would fail this access check. 3378 */ 3379 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3380 regs[rd] = NULL; 3381 break; 3382 } 3383 } 3384 3385 /* 3386 * First, zero the token map, and then process the token 3387 * string -- setting a bit in the map for every character 3388 * found in the token string. 3389 */ 3390 for (i = 0; i < sizeof (tokmap); i++) 3391 tokmap[i] = 0; 3392 3393 for (; tokaddr < toklimit; tokaddr++) { 3394 if ((c = dtrace_load8(tokaddr)) == '\0') 3395 break; 3396 3397 ASSERT((c >> 3) < sizeof (tokmap)); 3398 tokmap[c >> 3] |= (1 << (c & 0x7)); 3399 } 3400 3401 for (limit = addr + size; addr < limit; addr++) { 3402 /* 3403 * We're looking for a character that is _not_ contained 3404 * in the token string. 3405 */ 3406 if ((c = dtrace_load8(addr)) == '\0') 3407 break; 3408 3409 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3410 break; 3411 } 3412 3413 if (c == '\0') { 3414 /* 3415 * We reached the end of the string without finding 3416 * any character that was not in the token string. 3417 * We return NULL in this case, and we set the saved 3418 * address to NULL as well. 3419 */ 3420 regs[rd] = NULL; 3421 mstate->dtms_strtok = NULL; 3422 break; 3423 } 3424 3425 /* 3426 * From here on, we're copying into the destination string. 3427 */ 3428 for (i = 0; addr < limit && i < size - 1; addr++) { 3429 if ((c = dtrace_load8(addr)) == '\0') 3430 break; 3431 3432 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3433 break; 3434 3435 ASSERT(i < size); 3436 dest[i++] = c; 3437 } 3438 3439 ASSERT(i < size); 3440 dest[i] = '\0'; 3441 regs[rd] = (uintptr_t)dest; 3442 mstate->dtms_scratch_ptr += size; 3443 mstate->dtms_strtok = addr; 3444 break; 3445 } 3446 3447 case DIF_SUBR_SUBSTR: { 3448 uintptr_t s = tupregs[0].dttk_value; 3449 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3450 char *d = (char *)mstate->dtms_scratch_ptr; 3451 int64_t index = (int64_t)tupregs[1].dttk_value; 3452 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3453 size_t len = dtrace_strlen((char *)s, size); 3454 int64_t i = 0; 3455 3456 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3457 regs[rd] = NULL; 3458 break; 3459 } 3460 3461 if (nargs <= 2) 3462 remaining = (int64_t)size; 3463 3464 if (!DTRACE_INSCRATCH(mstate, size)) { 3465 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3466 regs[rd] = NULL; 3467 break; 3468 } 3469 3470 if (index < 0) { 3471 index += len; 3472 3473 if (index < 0 && index + remaining > 0) { 3474 remaining += index; 3475 index = 0; 3476 } 3477 } 3478 3479 if (index >= len || index < 0) 3480 index = len; 3481 3482 for (d[0] = '\0'; remaining > 0; remaining--) { 3483 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 3484 break; 3485 3486 if (i == size) { 3487 d[i - 1] = '\0'; 3488 break; 3489 } 3490 } 3491 3492 mstate->dtms_scratch_ptr += size; 3493 regs[rd] = (uintptr_t)d; 3494 break; 3495 } 3496 3497 case DIF_SUBR_GETMAJOR: 3498 #ifdef _LP64 3499 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3500 #else 3501 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3502 #endif 3503 break; 3504 3505 case DIF_SUBR_GETMINOR: 3506 #ifdef _LP64 3507 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3508 #else 3509 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3510 #endif 3511 break; 3512 3513 case DIF_SUBR_DDI_PATHNAME: { 3514 /* 3515 * This one is a galactic mess. We are going to roughly 3516 * emulate ddi_pathname(), but it's made more complicated 3517 * by the fact that we (a) want to include the minor name and 3518 * (b) must proceed iteratively instead of recursively. 3519 */ 3520 uintptr_t dest = mstate->dtms_scratch_ptr; 3521 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3522 char *start = (char *)dest, *end = start + size - 1; 3523 uintptr_t daddr = tupregs[0].dttk_value; 3524 int64_t minor = (int64_t)tupregs[1].dttk_value; 3525 char *s; 3526 int i, len, depth = 0; 3527 3528 /* 3529 * Due to all the pointer jumping we do and context we must 3530 * rely upon, we just mandate that the user must have kernel 3531 * read privileges to use this routine. 3532 */ 3533 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3534 *flags |= CPU_DTRACE_KPRIV; 3535 *illval = daddr; 3536 regs[rd] = NULL; 3537 } 3538 3539 if (!DTRACE_INSCRATCH(mstate, size)) { 3540 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3541 regs[rd] = NULL; 3542 break; 3543 } 3544 3545 *end = '\0'; 3546 3547 /* 3548 * We want to have a name for the minor. In order to do this, 3549 * we need to walk the minor list from the devinfo. We want 3550 * to be sure that we don't infinitely walk a circular list, 3551 * so we check for circularity by sending a scout pointer 3552 * ahead two elements for every element that we iterate over; 3553 * if the list is circular, these will ultimately point to the 3554 * same element. You may recognize this little trick as the 3555 * answer to a stupid interview question -- one that always 3556 * seems to be asked by those who had to have it laboriously 3557 * explained to them, and who can't even concisely describe 3558 * the conditions under which one would be forced to resort to 3559 * this technique. Needless to say, those conditions are 3560 * found here -- and probably only here. Is this is the only 3561 * use of this infamous trick in shipping, production code? 3562 * If it isn't, it probably should be... 3563 */ 3564 if (minor != -1) { 3565 uintptr_t maddr = dtrace_loadptr(daddr + 3566 offsetof(struct dev_info, devi_minor)); 3567 3568 uintptr_t next = offsetof(struct ddi_minor_data, next); 3569 uintptr_t name = offsetof(struct ddi_minor_data, 3570 d_minor) + offsetof(struct ddi_minor, name); 3571 uintptr_t dev = offsetof(struct ddi_minor_data, 3572 d_minor) + offsetof(struct ddi_minor, dev); 3573 uintptr_t scout; 3574 3575 if (maddr != NULL) 3576 scout = dtrace_loadptr(maddr + next); 3577 3578 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3579 uint64_t m; 3580 #ifdef _LP64 3581 m = dtrace_load64(maddr + dev) & MAXMIN64; 3582 #else 3583 m = dtrace_load32(maddr + dev) & MAXMIN; 3584 #endif 3585 if (m != minor) { 3586 maddr = dtrace_loadptr(maddr + next); 3587 3588 if (scout == NULL) 3589 continue; 3590 3591 scout = dtrace_loadptr(scout + next); 3592 3593 if (scout == NULL) 3594 continue; 3595 3596 scout = dtrace_loadptr(scout + next); 3597 3598 if (scout == NULL) 3599 continue; 3600 3601 if (scout == maddr) { 3602 *flags |= CPU_DTRACE_ILLOP; 3603 break; 3604 } 3605 3606 continue; 3607 } 3608 3609 /* 3610 * We have the minor data. Now we need to 3611 * copy the minor's name into the end of the 3612 * pathname. 3613 */ 3614 s = (char *)dtrace_loadptr(maddr + name); 3615 len = dtrace_strlen(s, size); 3616 3617 if (*flags & CPU_DTRACE_FAULT) 3618 break; 3619 3620 if (len != 0) { 3621 if ((end -= (len + 1)) < start) 3622 break; 3623 3624 *end = ':'; 3625 } 3626 3627 for (i = 1; i <= len; i++) 3628 end[i] = dtrace_load8((uintptr_t)s++); 3629 break; 3630 } 3631 } 3632 3633 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3634 ddi_node_state_t devi_state; 3635 3636 devi_state = dtrace_load32(daddr + 3637 offsetof(struct dev_info, devi_node_state)); 3638 3639 if (*flags & CPU_DTRACE_FAULT) 3640 break; 3641 3642 if (devi_state >= DS_INITIALIZED) { 3643 s = (char *)dtrace_loadptr(daddr + 3644 offsetof(struct dev_info, devi_addr)); 3645 len = dtrace_strlen(s, size); 3646 3647 if (*flags & CPU_DTRACE_FAULT) 3648 break; 3649 3650 if (len != 0) { 3651 if ((end -= (len + 1)) < start) 3652 break; 3653 3654 *end = '@'; 3655 } 3656 3657 for (i = 1; i <= len; i++) 3658 end[i] = dtrace_load8((uintptr_t)s++); 3659 } 3660 3661 /* 3662 * Now for the node name... 3663 */ 3664 s = (char *)dtrace_loadptr(daddr + 3665 offsetof(struct dev_info, devi_node_name)); 3666 3667 daddr = dtrace_loadptr(daddr + 3668 offsetof(struct dev_info, devi_parent)); 3669 3670 /* 3671 * If our parent is NULL (that is, if we're the root 3672 * node), we're going to use the special path 3673 * "devices". 3674 */ 3675 if (daddr == NULL) 3676 s = "devices"; 3677 3678 len = dtrace_strlen(s, size); 3679 if (*flags & CPU_DTRACE_FAULT) 3680 break; 3681 3682 if ((end -= (len + 1)) < start) 3683 break; 3684 3685 for (i = 1; i <= len; i++) 3686 end[i] = dtrace_load8((uintptr_t)s++); 3687 *end = '/'; 3688 3689 if (depth++ > dtrace_devdepth_max) { 3690 *flags |= CPU_DTRACE_ILLOP; 3691 break; 3692 } 3693 } 3694 3695 if (end < start) 3696 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3697 3698 if (daddr == NULL) { 3699 regs[rd] = (uintptr_t)end; 3700 mstate->dtms_scratch_ptr += size; 3701 } 3702 3703 break; 3704 } 3705 3706 case DIF_SUBR_STRJOIN: { 3707 char *d = (char *)mstate->dtms_scratch_ptr; 3708 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3709 uintptr_t s1 = tupregs[0].dttk_value; 3710 uintptr_t s2 = tupregs[1].dttk_value; 3711 int i = 0; 3712 3713 if (!dtrace_strcanload(s1, size, mstate, vstate) || 3714 !dtrace_strcanload(s2, size, mstate, vstate)) { 3715 regs[rd] = NULL; 3716 break; 3717 } 3718 3719 if (!DTRACE_INSCRATCH(mstate, size)) { 3720 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3721 regs[rd] = NULL; 3722 break; 3723 } 3724 3725 for (;;) { 3726 if (i >= size) { 3727 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3728 regs[rd] = NULL; 3729 break; 3730 } 3731 3732 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3733 i--; 3734 break; 3735 } 3736 } 3737 3738 for (;;) { 3739 if (i >= size) { 3740 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3741 regs[rd] = NULL; 3742 break; 3743 } 3744 3745 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3746 break; 3747 } 3748 3749 if (i < size) { 3750 mstate->dtms_scratch_ptr += i; 3751 regs[rd] = (uintptr_t)d; 3752 } 3753 3754 break; 3755 } 3756 3757 case DIF_SUBR_LLTOSTR: { 3758 int64_t i = (int64_t)tupregs[0].dttk_value; 3759 int64_t val = i < 0 ? i * -1 : i; 3760 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3761 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3762 3763 if (!DTRACE_INSCRATCH(mstate, size)) { 3764 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3765 regs[rd] = NULL; 3766 break; 3767 } 3768 3769 for (*end-- = '\0'; val; val /= 10) 3770 *end-- = '0' + (val % 10); 3771 3772 if (i == 0) 3773 *end-- = '0'; 3774 3775 if (i < 0) 3776 *end-- = '-'; 3777 3778 regs[rd] = (uintptr_t)end + 1; 3779 mstate->dtms_scratch_ptr += size; 3780 break; 3781 } 3782 3783 case DIF_SUBR_HTONS: 3784 case DIF_SUBR_NTOHS: 3785 #ifdef _BIG_ENDIAN 3786 regs[rd] = (uint16_t)tupregs[0].dttk_value; 3787 #else 3788 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 3789 #endif 3790 break; 3791 3792 3793 case DIF_SUBR_HTONL: 3794 case DIF_SUBR_NTOHL: 3795 #ifdef _BIG_ENDIAN 3796 regs[rd] = (uint32_t)tupregs[0].dttk_value; 3797 #else 3798 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 3799 #endif 3800 break; 3801 3802 3803 case DIF_SUBR_HTONLL: 3804 case DIF_SUBR_NTOHLL: 3805 #ifdef _BIG_ENDIAN 3806 regs[rd] = (uint64_t)tupregs[0].dttk_value; 3807 #else 3808 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 3809 #endif 3810 break; 3811 3812 3813 case DIF_SUBR_DIRNAME: 3814 case DIF_SUBR_BASENAME: { 3815 char *dest = (char *)mstate->dtms_scratch_ptr; 3816 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3817 uintptr_t src = tupregs[0].dttk_value; 3818 int i, j, len = dtrace_strlen((char *)src, size); 3819 int lastbase = -1, firstbase = -1, lastdir = -1; 3820 int start, end; 3821 3822 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 3823 regs[rd] = NULL; 3824 break; 3825 } 3826 3827 if (!DTRACE_INSCRATCH(mstate, size)) { 3828 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3829 regs[rd] = NULL; 3830 break; 3831 } 3832 3833 /* 3834 * The basename and dirname for a zero-length string is 3835 * defined to be "." 3836 */ 3837 if (len == 0) { 3838 len = 1; 3839 src = (uintptr_t)"."; 3840 } 3841 3842 /* 3843 * Start from the back of the string, moving back toward the 3844 * front until we see a character that isn't a slash. That 3845 * character is the last character in the basename. 3846 */ 3847 for (i = len - 1; i >= 0; i--) { 3848 if (dtrace_load8(src + i) != '/') 3849 break; 3850 } 3851 3852 if (i >= 0) 3853 lastbase = i; 3854 3855 /* 3856 * Starting from the last character in the basename, move 3857 * towards the front until we find a slash. The character 3858 * that we processed immediately before that is the first 3859 * character in the basename. 3860 */ 3861 for (; i >= 0; i--) { 3862 if (dtrace_load8(src + i) == '/') 3863 break; 3864 } 3865 3866 if (i >= 0) 3867 firstbase = i + 1; 3868 3869 /* 3870 * Now keep going until we find a non-slash character. That 3871 * character is the last character in the dirname. 3872 */ 3873 for (; i >= 0; i--) { 3874 if (dtrace_load8(src + i) != '/') 3875 break; 3876 } 3877 3878 if (i >= 0) 3879 lastdir = i; 3880 3881 ASSERT(!(lastbase == -1 && firstbase != -1)); 3882 ASSERT(!(firstbase == -1 && lastdir != -1)); 3883 3884 if (lastbase == -1) { 3885 /* 3886 * We didn't find a non-slash character. We know that 3887 * the length is non-zero, so the whole string must be 3888 * slashes. In either the dirname or the basename 3889 * case, we return '/'. 3890 */ 3891 ASSERT(firstbase == -1); 3892 firstbase = lastbase = lastdir = 0; 3893 } 3894 3895 if (firstbase == -1) { 3896 /* 3897 * The entire string consists only of a basename 3898 * component. If we're looking for dirname, we need 3899 * to change our string to be just "."; if we're 3900 * looking for a basename, we'll just set the first 3901 * character of the basename to be 0. 3902 */ 3903 if (subr == DIF_SUBR_DIRNAME) { 3904 ASSERT(lastdir == -1); 3905 src = (uintptr_t)"."; 3906 lastdir = 0; 3907 } else { 3908 firstbase = 0; 3909 } 3910 } 3911 3912 if (subr == DIF_SUBR_DIRNAME) { 3913 if (lastdir == -1) { 3914 /* 3915 * We know that we have a slash in the name -- 3916 * or lastdir would be set to 0, above. And 3917 * because lastdir is -1, we know that this 3918 * slash must be the first character. (That 3919 * is, the full string must be of the form 3920 * "/basename".) In this case, the last 3921 * character of the directory name is 0. 3922 */ 3923 lastdir = 0; 3924 } 3925 3926 start = 0; 3927 end = lastdir; 3928 } else { 3929 ASSERT(subr == DIF_SUBR_BASENAME); 3930 ASSERT(firstbase != -1 && lastbase != -1); 3931 start = firstbase; 3932 end = lastbase; 3933 } 3934 3935 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3936 dest[j] = dtrace_load8(src + i); 3937 3938 dest[j] = '\0'; 3939 regs[rd] = (uintptr_t)dest; 3940 mstate->dtms_scratch_ptr += size; 3941 break; 3942 } 3943 3944 case DIF_SUBR_CLEANPATH: { 3945 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3946 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3947 uintptr_t src = tupregs[0].dttk_value; 3948 int i = 0, j = 0; 3949 3950 if (!dtrace_strcanload(src, size, mstate, vstate)) { 3951 regs[rd] = NULL; 3952 break; 3953 } 3954 3955 if (!DTRACE_INSCRATCH(mstate, size)) { 3956 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3957 regs[rd] = NULL; 3958 break; 3959 } 3960 3961 /* 3962 * Move forward, loading each character. 3963 */ 3964 do { 3965 c = dtrace_load8(src + i++); 3966 next: 3967 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3968 break; 3969 3970 if (c != '/') { 3971 dest[j++] = c; 3972 continue; 3973 } 3974 3975 c = dtrace_load8(src + i++); 3976 3977 if (c == '/') { 3978 /* 3979 * We have two slashes -- we can just advance 3980 * to the next character. 3981 */ 3982 goto next; 3983 } 3984 3985 if (c != '.') { 3986 /* 3987 * This is not "." and it's not ".." -- we can 3988 * just store the "/" and this character and 3989 * drive on. 3990 */ 3991 dest[j++] = '/'; 3992 dest[j++] = c; 3993 continue; 3994 } 3995 3996 c = dtrace_load8(src + i++); 3997 3998 if (c == '/') { 3999 /* 4000 * This is a "/./" component. We're not going 4001 * to store anything in the destination buffer; 4002 * we're just going to go to the next component. 4003 */ 4004 goto next; 4005 } 4006 4007 if (c != '.') { 4008 /* 4009 * This is not ".." -- we can just store the 4010 * "/." and this character and continue 4011 * processing. 4012 */ 4013 dest[j++] = '/'; 4014 dest[j++] = '.'; 4015 dest[j++] = c; 4016 continue; 4017 } 4018 4019 c = dtrace_load8(src + i++); 4020 4021 if (c != '/' && c != '\0') { 4022 /* 4023 * This is not ".." -- it's "..[mumble]". 4024 * We'll store the "/.." and this character 4025 * and continue processing. 4026 */ 4027 dest[j++] = '/'; 4028 dest[j++] = '.'; 4029 dest[j++] = '.'; 4030 dest[j++] = c; 4031 continue; 4032 } 4033 4034 /* 4035 * This is "/../" or "/..\0". We need to back up 4036 * our destination pointer until we find a "/". 4037 */ 4038 i--; 4039 while (j != 0 && dest[--j] != '/') 4040 continue; 4041 4042 if (c == '\0') 4043 dest[++j] = '/'; 4044 } while (c != '\0'); 4045 4046 dest[j] = '\0'; 4047 regs[rd] = (uintptr_t)dest; 4048 mstate->dtms_scratch_ptr += size; 4049 break; 4050 } 4051 4052 case DIF_SUBR_INET_NTOA: 4053 case DIF_SUBR_INET_NTOA6: 4054 case DIF_SUBR_INET_NTOP: { 4055 size_t size; 4056 int af, argi, i; 4057 char *base, *end; 4058 4059 if (subr == DIF_SUBR_INET_NTOP) { 4060 af = (int)tupregs[0].dttk_value; 4061 argi = 1; 4062 } else { 4063 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4064 argi = 0; 4065 } 4066 4067 if (af == AF_INET) { 4068 ipaddr_t ip4; 4069 uint8_t *ptr8, val; 4070 4071 /* 4072 * Safely load the IPv4 address. 4073 */ 4074 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4075 4076 /* 4077 * Check an IPv4 string will fit in scratch. 4078 */ 4079 size = INET_ADDRSTRLEN; 4080 if (!DTRACE_INSCRATCH(mstate, size)) { 4081 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4082 regs[rd] = NULL; 4083 break; 4084 } 4085 base = (char *)mstate->dtms_scratch_ptr; 4086 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4087 4088 /* 4089 * Stringify as a dotted decimal quad. 4090 */ 4091 *end-- = '\0'; 4092 ptr8 = (uint8_t *)&ip4; 4093 for (i = 3; i >= 0; i--) { 4094 val = ptr8[i]; 4095 4096 if (val == 0) { 4097 *end-- = '0'; 4098 } else { 4099 for (; val; val /= 10) { 4100 *end-- = '0' + (val % 10); 4101 } 4102 } 4103 4104 if (i > 0) 4105 *end-- = '.'; 4106 } 4107 ASSERT(end + 1 >= base); 4108 4109 } else if (af == AF_INET6) { 4110 struct in6_addr ip6; 4111 int firstzero, tryzero, numzero, v6end; 4112 uint16_t val; 4113 const char digits[] = "0123456789abcdef"; 4114 4115 /* 4116 * Stringify using RFC 1884 convention 2 - 16 bit 4117 * hexadecimal values with a zero-run compression. 4118 * Lower case hexadecimal digits are used. 4119 * eg, fe80::214:4fff:fe0b:76c8. 4120 * The IPv4 embedded form is returned for inet_ntop, 4121 * just the IPv4 string is returned for inet_ntoa6. 4122 */ 4123 4124 /* 4125 * Safely load the IPv6 address. 4126 */ 4127 dtrace_bcopy( 4128 (void *)(uintptr_t)tupregs[argi].dttk_value, 4129 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4130 4131 /* 4132 * Check an IPv6 string will fit in scratch. 4133 */ 4134 size = INET6_ADDRSTRLEN; 4135 if (!DTRACE_INSCRATCH(mstate, size)) { 4136 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4137 regs[rd] = NULL; 4138 break; 4139 } 4140 base = (char *)mstate->dtms_scratch_ptr; 4141 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4142 *end-- = '\0'; 4143 4144 /* 4145 * Find the longest run of 16 bit zero values 4146 * for the single allowed zero compression - "::". 4147 */ 4148 firstzero = -1; 4149 tryzero = -1; 4150 numzero = 1; 4151 for (i = 0; i < sizeof (struct in6_addr); i++) { 4152 if (ip6._S6_un._S6_u8[i] == 0 && 4153 tryzero == -1 && i % 2 == 0) { 4154 tryzero = i; 4155 continue; 4156 } 4157 4158 if (tryzero != -1 && 4159 (ip6._S6_un._S6_u8[i] != 0 || 4160 i == sizeof (struct in6_addr) - 1)) { 4161 4162 if (i - tryzero <= numzero) { 4163 tryzero = -1; 4164 continue; 4165 } 4166 4167 firstzero = tryzero; 4168 numzero = i - i % 2 - tryzero; 4169 tryzero = -1; 4170 4171 if (ip6._S6_un._S6_u8[i] == 0 && 4172 i == sizeof (struct in6_addr) - 1) 4173 numzero += 2; 4174 } 4175 } 4176 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4177 4178 /* 4179 * Check for an IPv4 embedded address. 4180 */ 4181 v6end = sizeof (struct in6_addr) - 2; 4182 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4183 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4184 for (i = sizeof (struct in6_addr) - 1; 4185 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4186 ASSERT(end >= base); 4187 4188 val = ip6._S6_un._S6_u8[i]; 4189 4190 if (val == 0) { 4191 *end-- = '0'; 4192 } else { 4193 for (; val; val /= 10) { 4194 *end-- = '0' + val % 10; 4195 } 4196 } 4197 4198 if (i > DTRACE_V4MAPPED_OFFSET) 4199 *end-- = '.'; 4200 } 4201 4202 if (subr == DIF_SUBR_INET_NTOA6) 4203 goto inetout; 4204 4205 /* 4206 * Set v6end to skip the IPv4 address that 4207 * we have already stringified. 4208 */ 4209 v6end = 10; 4210 } 4211 4212 /* 4213 * Build the IPv6 string by working through the 4214 * address in reverse. 4215 */ 4216 for (i = v6end; i >= 0; i -= 2) { 4217 ASSERT(end >= base); 4218 4219 if (i == firstzero + numzero - 2) { 4220 *end-- = ':'; 4221 *end-- = ':'; 4222 i -= numzero - 2; 4223 continue; 4224 } 4225 4226 if (i < 14 && i != firstzero - 2) 4227 *end-- = ':'; 4228 4229 val = (ip6._S6_un._S6_u8[i] << 8) + 4230 ip6._S6_un._S6_u8[i + 1]; 4231 4232 if (val == 0) { 4233 *end-- = '0'; 4234 } else { 4235 for (; val; val /= 16) { 4236 *end-- = digits[val % 16]; 4237 } 4238 } 4239 } 4240 ASSERT(end + 1 >= base); 4241 4242 } else { 4243 /* 4244 * The user didn't use AH_INET or AH_INET6. 4245 */ 4246 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4247 regs[rd] = NULL; 4248 break; 4249 } 4250 4251 inetout: regs[rd] = (uintptr_t)end + 1; 4252 mstate->dtms_scratch_ptr += size; 4253 break; 4254 } 4255 4256 } 4257 } 4258 4259 /* 4260 * Emulate the execution of DTrace IR instructions specified by the given 4261 * DIF object. This function is deliberately void of assertions as all of 4262 * the necessary checks are handled by a call to dtrace_difo_validate(). 4263 */ 4264 static uint64_t 4265 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4266 dtrace_vstate_t *vstate, dtrace_state_t *state) 4267 { 4268 const dif_instr_t *text = difo->dtdo_buf; 4269 const uint_t textlen = difo->dtdo_len; 4270 const char *strtab = difo->dtdo_strtab; 4271 const uint64_t *inttab = difo->dtdo_inttab; 4272 4273 uint64_t rval = 0; 4274 dtrace_statvar_t *svar; 4275 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4276 dtrace_difv_t *v; 4277 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4278 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4279 4280 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4281 uint64_t regs[DIF_DIR_NREGS]; 4282 uint64_t *tmp; 4283 4284 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4285 int64_t cc_r; 4286 uint_t pc = 0, id, opc; 4287 uint8_t ttop = 0; 4288 dif_instr_t instr; 4289 uint_t r1, r2, rd; 4290 4291 /* 4292 * We stash the current DIF object into the machine state: we need it 4293 * for subsequent access checking. 4294 */ 4295 mstate->dtms_difo = difo; 4296 4297 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4298 4299 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4300 opc = pc; 4301 4302 instr = text[pc++]; 4303 r1 = DIF_INSTR_R1(instr); 4304 r2 = DIF_INSTR_R2(instr); 4305 rd = DIF_INSTR_RD(instr); 4306 4307 switch (DIF_INSTR_OP(instr)) { 4308 case DIF_OP_OR: 4309 regs[rd] = regs[r1] | regs[r2]; 4310 break; 4311 case DIF_OP_XOR: 4312 regs[rd] = regs[r1] ^ regs[r2]; 4313 break; 4314 case DIF_OP_AND: 4315 regs[rd] = regs[r1] & regs[r2]; 4316 break; 4317 case DIF_OP_SLL: 4318 regs[rd] = regs[r1] << regs[r2]; 4319 break; 4320 case DIF_OP_SRL: 4321 regs[rd] = regs[r1] >> regs[r2]; 4322 break; 4323 case DIF_OP_SUB: 4324 regs[rd] = regs[r1] - regs[r2]; 4325 break; 4326 case DIF_OP_ADD: 4327 regs[rd] = regs[r1] + regs[r2]; 4328 break; 4329 case DIF_OP_MUL: 4330 regs[rd] = regs[r1] * regs[r2]; 4331 break; 4332 case DIF_OP_SDIV: 4333 if (regs[r2] == 0) { 4334 regs[rd] = 0; 4335 *flags |= CPU_DTRACE_DIVZERO; 4336 } else { 4337 regs[rd] = (int64_t)regs[r1] / 4338 (int64_t)regs[r2]; 4339 } 4340 break; 4341 4342 case DIF_OP_UDIV: 4343 if (regs[r2] == 0) { 4344 regs[rd] = 0; 4345 *flags |= CPU_DTRACE_DIVZERO; 4346 } else { 4347 regs[rd] = regs[r1] / regs[r2]; 4348 } 4349 break; 4350 4351 case DIF_OP_SREM: 4352 if (regs[r2] == 0) { 4353 regs[rd] = 0; 4354 *flags |= CPU_DTRACE_DIVZERO; 4355 } else { 4356 regs[rd] = (int64_t)regs[r1] % 4357 (int64_t)regs[r2]; 4358 } 4359 break; 4360 4361 case DIF_OP_UREM: 4362 if (regs[r2] == 0) { 4363 regs[rd] = 0; 4364 *flags |= CPU_DTRACE_DIVZERO; 4365 } else { 4366 regs[rd] = regs[r1] % regs[r2]; 4367 } 4368 break; 4369 4370 case DIF_OP_NOT: 4371 regs[rd] = ~regs[r1]; 4372 break; 4373 case DIF_OP_MOV: 4374 regs[rd] = regs[r1]; 4375 break; 4376 case DIF_OP_CMP: 4377 cc_r = regs[r1] - regs[r2]; 4378 cc_n = cc_r < 0; 4379 cc_z = cc_r == 0; 4380 cc_v = 0; 4381 cc_c = regs[r1] < regs[r2]; 4382 break; 4383 case DIF_OP_TST: 4384 cc_n = cc_v = cc_c = 0; 4385 cc_z = regs[r1] == 0; 4386 break; 4387 case DIF_OP_BA: 4388 pc = DIF_INSTR_LABEL(instr); 4389 break; 4390 case DIF_OP_BE: 4391 if (cc_z) 4392 pc = DIF_INSTR_LABEL(instr); 4393 break; 4394 case DIF_OP_BNE: 4395 if (cc_z == 0) 4396 pc = DIF_INSTR_LABEL(instr); 4397 break; 4398 case DIF_OP_BG: 4399 if ((cc_z | (cc_n ^ cc_v)) == 0) 4400 pc = DIF_INSTR_LABEL(instr); 4401 break; 4402 case DIF_OP_BGU: 4403 if ((cc_c | cc_z) == 0) 4404 pc = DIF_INSTR_LABEL(instr); 4405 break; 4406 case DIF_OP_BGE: 4407 if ((cc_n ^ cc_v) == 0) 4408 pc = DIF_INSTR_LABEL(instr); 4409 break; 4410 case DIF_OP_BGEU: 4411 if (cc_c == 0) 4412 pc = DIF_INSTR_LABEL(instr); 4413 break; 4414 case DIF_OP_BL: 4415 if (cc_n ^ cc_v) 4416 pc = DIF_INSTR_LABEL(instr); 4417 break; 4418 case DIF_OP_BLU: 4419 if (cc_c) 4420 pc = DIF_INSTR_LABEL(instr); 4421 break; 4422 case DIF_OP_BLE: 4423 if (cc_z | (cc_n ^ cc_v)) 4424 pc = DIF_INSTR_LABEL(instr); 4425 break; 4426 case DIF_OP_BLEU: 4427 if (cc_c | cc_z) 4428 pc = DIF_INSTR_LABEL(instr); 4429 break; 4430 case DIF_OP_RLDSB: 4431 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4432 *flags |= CPU_DTRACE_KPRIV; 4433 *illval = regs[r1]; 4434 break; 4435 } 4436 /*FALLTHROUGH*/ 4437 case DIF_OP_LDSB: 4438 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4439 break; 4440 case DIF_OP_RLDSH: 4441 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4442 *flags |= CPU_DTRACE_KPRIV; 4443 *illval = regs[r1]; 4444 break; 4445 } 4446 /*FALLTHROUGH*/ 4447 case DIF_OP_LDSH: 4448 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4449 break; 4450 case DIF_OP_RLDSW: 4451 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4452 *flags |= CPU_DTRACE_KPRIV; 4453 *illval = regs[r1]; 4454 break; 4455 } 4456 /*FALLTHROUGH*/ 4457 case DIF_OP_LDSW: 4458 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4459 break; 4460 case DIF_OP_RLDUB: 4461 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4462 *flags |= CPU_DTRACE_KPRIV; 4463 *illval = regs[r1]; 4464 break; 4465 } 4466 /*FALLTHROUGH*/ 4467 case DIF_OP_LDUB: 4468 regs[rd] = dtrace_load8(regs[r1]); 4469 break; 4470 case DIF_OP_RLDUH: 4471 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4472 *flags |= CPU_DTRACE_KPRIV; 4473 *illval = regs[r1]; 4474 break; 4475 } 4476 /*FALLTHROUGH*/ 4477 case DIF_OP_LDUH: 4478 regs[rd] = dtrace_load16(regs[r1]); 4479 break; 4480 case DIF_OP_RLDUW: 4481 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4482 *flags |= CPU_DTRACE_KPRIV; 4483 *illval = regs[r1]; 4484 break; 4485 } 4486 /*FALLTHROUGH*/ 4487 case DIF_OP_LDUW: 4488 regs[rd] = dtrace_load32(regs[r1]); 4489 break; 4490 case DIF_OP_RLDX: 4491 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4492 *flags |= CPU_DTRACE_KPRIV; 4493 *illval = regs[r1]; 4494 break; 4495 } 4496 /*FALLTHROUGH*/ 4497 case DIF_OP_LDX: 4498 regs[rd] = dtrace_load64(regs[r1]); 4499 break; 4500 case DIF_OP_ULDSB: 4501 regs[rd] = (int8_t) 4502 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4503 break; 4504 case DIF_OP_ULDSH: 4505 regs[rd] = (int16_t) 4506 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4507 break; 4508 case DIF_OP_ULDSW: 4509 regs[rd] = (int32_t) 4510 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4511 break; 4512 case DIF_OP_ULDUB: 4513 regs[rd] = 4514 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4515 break; 4516 case DIF_OP_ULDUH: 4517 regs[rd] = 4518 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4519 break; 4520 case DIF_OP_ULDUW: 4521 regs[rd] = 4522 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4523 break; 4524 case DIF_OP_ULDX: 4525 regs[rd] = 4526 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4527 break; 4528 case DIF_OP_RET: 4529 rval = regs[rd]; 4530 break; 4531 case DIF_OP_NOP: 4532 break; 4533 case DIF_OP_SETX: 4534 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4535 break; 4536 case DIF_OP_SETS: 4537 regs[rd] = (uint64_t)(uintptr_t) 4538 (strtab + DIF_INSTR_STRING(instr)); 4539 break; 4540 case DIF_OP_SCMP: { 4541 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 4542 uintptr_t s1 = regs[r1]; 4543 uintptr_t s2 = regs[r2]; 4544 4545 if (s1 != NULL && 4546 !dtrace_strcanload(s1, sz, mstate, vstate)) 4547 break; 4548 if (s2 != NULL && 4549 !dtrace_strcanload(s2, sz, mstate, vstate)) 4550 break; 4551 4552 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 4553 4554 cc_n = cc_r < 0; 4555 cc_z = cc_r == 0; 4556 cc_v = cc_c = 0; 4557 break; 4558 } 4559 case DIF_OP_LDGA: 4560 regs[rd] = dtrace_dif_variable(mstate, state, 4561 r1, regs[r2]); 4562 break; 4563 case DIF_OP_LDGS: 4564 id = DIF_INSTR_VAR(instr); 4565 4566 if (id >= DIF_VAR_OTHER_UBASE) { 4567 uintptr_t a; 4568 4569 id -= DIF_VAR_OTHER_UBASE; 4570 svar = vstate->dtvs_globals[id]; 4571 ASSERT(svar != NULL); 4572 v = &svar->dtsv_var; 4573 4574 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4575 regs[rd] = svar->dtsv_data; 4576 break; 4577 } 4578 4579 a = (uintptr_t)svar->dtsv_data; 4580 4581 if (*(uint8_t *)a == UINT8_MAX) { 4582 /* 4583 * If the 0th byte is set to UINT8_MAX 4584 * then this is to be treated as a 4585 * reference to a NULL variable. 4586 */ 4587 regs[rd] = NULL; 4588 } else { 4589 regs[rd] = a + sizeof (uint64_t); 4590 } 4591 4592 break; 4593 } 4594 4595 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4596 break; 4597 4598 case DIF_OP_STGS: 4599 id = DIF_INSTR_VAR(instr); 4600 4601 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4602 id -= DIF_VAR_OTHER_UBASE; 4603 4604 svar = vstate->dtvs_globals[id]; 4605 ASSERT(svar != NULL); 4606 v = &svar->dtsv_var; 4607 4608 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4609 uintptr_t a = (uintptr_t)svar->dtsv_data; 4610 4611 ASSERT(a != NULL); 4612 ASSERT(svar->dtsv_size != 0); 4613 4614 if (regs[rd] == NULL) { 4615 *(uint8_t *)a = UINT8_MAX; 4616 break; 4617 } else { 4618 *(uint8_t *)a = 0; 4619 a += sizeof (uint64_t); 4620 } 4621 if (!dtrace_vcanload( 4622 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4623 mstate, vstate)) 4624 break; 4625 4626 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4627 (void *)a, &v->dtdv_type); 4628 break; 4629 } 4630 4631 svar->dtsv_data = regs[rd]; 4632 break; 4633 4634 case DIF_OP_LDTA: 4635 /* 4636 * There are no DTrace built-in thread-local arrays at 4637 * present. This opcode is saved for future work. 4638 */ 4639 *flags |= CPU_DTRACE_ILLOP; 4640 regs[rd] = 0; 4641 break; 4642 4643 case DIF_OP_LDLS: 4644 id = DIF_INSTR_VAR(instr); 4645 4646 if (id < DIF_VAR_OTHER_UBASE) { 4647 /* 4648 * For now, this has no meaning. 4649 */ 4650 regs[rd] = 0; 4651 break; 4652 } 4653 4654 id -= DIF_VAR_OTHER_UBASE; 4655 4656 ASSERT(id < vstate->dtvs_nlocals); 4657 ASSERT(vstate->dtvs_locals != NULL); 4658 4659 svar = vstate->dtvs_locals[id]; 4660 ASSERT(svar != NULL); 4661 v = &svar->dtsv_var; 4662 4663 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4664 uintptr_t a = (uintptr_t)svar->dtsv_data; 4665 size_t sz = v->dtdv_type.dtdt_size; 4666 4667 sz += sizeof (uint64_t); 4668 ASSERT(svar->dtsv_size == NCPU * sz); 4669 a += CPU->cpu_id * sz; 4670 4671 if (*(uint8_t *)a == UINT8_MAX) { 4672 /* 4673 * If the 0th byte is set to UINT8_MAX 4674 * then this is to be treated as a 4675 * reference to a NULL variable. 4676 */ 4677 regs[rd] = NULL; 4678 } else { 4679 regs[rd] = a + sizeof (uint64_t); 4680 } 4681 4682 break; 4683 } 4684 4685 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4686 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4687 regs[rd] = tmp[CPU->cpu_id]; 4688 break; 4689 4690 case DIF_OP_STLS: 4691 id = DIF_INSTR_VAR(instr); 4692 4693 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4694 id -= DIF_VAR_OTHER_UBASE; 4695 ASSERT(id < vstate->dtvs_nlocals); 4696 4697 ASSERT(vstate->dtvs_locals != NULL); 4698 svar = vstate->dtvs_locals[id]; 4699 ASSERT(svar != NULL); 4700 v = &svar->dtsv_var; 4701 4702 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4703 uintptr_t a = (uintptr_t)svar->dtsv_data; 4704 size_t sz = v->dtdv_type.dtdt_size; 4705 4706 sz += sizeof (uint64_t); 4707 ASSERT(svar->dtsv_size == NCPU * sz); 4708 a += CPU->cpu_id * sz; 4709 4710 if (regs[rd] == NULL) { 4711 *(uint8_t *)a = UINT8_MAX; 4712 break; 4713 } else { 4714 *(uint8_t *)a = 0; 4715 a += sizeof (uint64_t); 4716 } 4717 4718 if (!dtrace_vcanload( 4719 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4720 mstate, vstate)) 4721 break; 4722 4723 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4724 (void *)a, &v->dtdv_type); 4725 break; 4726 } 4727 4728 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4729 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4730 tmp[CPU->cpu_id] = regs[rd]; 4731 break; 4732 4733 case DIF_OP_LDTS: { 4734 dtrace_dynvar_t *dvar; 4735 dtrace_key_t *key; 4736 4737 id = DIF_INSTR_VAR(instr); 4738 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4739 id -= DIF_VAR_OTHER_UBASE; 4740 v = &vstate->dtvs_tlocals[id]; 4741 4742 key = &tupregs[DIF_DTR_NREGS]; 4743 key[0].dttk_value = (uint64_t)id; 4744 key[0].dttk_size = 0; 4745 DTRACE_TLS_THRKEY(key[1].dttk_value); 4746 key[1].dttk_size = 0; 4747 4748 dvar = dtrace_dynvar(dstate, 2, key, 4749 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 4750 mstate, vstate); 4751 4752 if (dvar == NULL) { 4753 regs[rd] = 0; 4754 break; 4755 } 4756 4757 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4758 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4759 } else { 4760 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4761 } 4762 4763 break; 4764 } 4765 4766 case DIF_OP_STTS: { 4767 dtrace_dynvar_t *dvar; 4768 dtrace_key_t *key; 4769 4770 id = DIF_INSTR_VAR(instr); 4771 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4772 id -= DIF_VAR_OTHER_UBASE; 4773 4774 key = &tupregs[DIF_DTR_NREGS]; 4775 key[0].dttk_value = (uint64_t)id; 4776 key[0].dttk_size = 0; 4777 DTRACE_TLS_THRKEY(key[1].dttk_value); 4778 key[1].dttk_size = 0; 4779 v = &vstate->dtvs_tlocals[id]; 4780 4781 dvar = dtrace_dynvar(dstate, 2, key, 4782 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4783 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4784 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4785 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4786 4787 /* 4788 * Given that we're storing to thread-local data, 4789 * we need to flush our predicate cache. 4790 */ 4791 curthread->t_predcache = NULL; 4792 4793 if (dvar == NULL) 4794 break; 4795 4796 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4797 if (!dtrace_vcanload( 4798 (void *)(uintptr_t)regs[rd], 4799 &v->dtdv_type, mstate, vstate)) 4800 break; 4801 4802 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4803 dvar->dtdv_data, &v->dtdv_type); 4804 } else { 4805 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4806 } 4807 4808 break; 4809 } 4810 4811 case DIF_OP_SRA: 4812 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 4813 break; 4814 4815 case DIF_OP_CALL: 4816 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 4817 regs, tupregs, ttop, mstate, state); 4818 break; 4819 4820 case DIF_OP_PUSHTR: 4821 if (ttop == DIF_DTR_NREGS) { 4822 *flags |= CPU_DTRACE_TUPOFLOW; 4823 break; 4824 } 4825 4826 if (r1 == DIF_TYPE_STRING) { 4827 /* 4828 * If this is a string type and the size is 0, 4829 * we'll use the system-wide default string 4830 * size. Note that we are _not_ looking at 4831 * the value of the DTRACEOPT_STRSIZE option; 4832 * had this been set, we would expect to have 4833 * a non-zero size value in the "pushtr". 4834 */ 4835 tupregs[ttop].dttk_size = 4836 dtrace_strlen((char *)(uintptr_t)regs[rd], 4837 regs[r2] ? regs[r2] : 4838 dtrace_strsize_default) + 1; 4839 } else { 4840 tupregs[ttop].dttk_size = regs[r2]; 4841 } 4842 4843 tupregs[ttop++].dttk_value = regs[rd]; 4844 break; 4845 4846 case DIF_OP_PUSHTV: 4847 if (ttop == DIF_DTR_NREGS) { 4848 *flags |= CPU_DTRACE_TUPOFLOW; 4849 break; 4850 } 4851 4852 tupregs[ttop].dttk_value = regs[rd]; 4853 tupregs[ttop++].dttk_size = 0; 4854 break; 4855 4856 case DIF_OP_POPTS: 4857 if (ttop != 0) 4858 ttop--; 4859 break; 4860 4861 case DIF_OP_FLUSHTS: 4862 ttop = 0; 4863 break; 4864 4865 case DIF_OP_LDGAA: 4866 case DIF_OP_LDTAA: { 4867 dtrace_dynvar_t *dvar; 4868 dtrace_key_t *key = tupregs; 4869 uint_t nkeys = ttop; 4870 4871 id = DIF_INSTR_VAR(instr); 4872 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4873 id -= DIF_VAR_OTHER_UBASE; 4874 4875 key[nkeys].dttk_value = (uint64_t)id; 4876 key[nkeys++].dttk_size = 0; 4877 4878 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4879 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4880 key[nkeys++].dttk_size = 0; 4881 v = &vstate->dtvs_tlocals[id]; 4882 } else { 4883 v = &vstate->dtvs_globals[id]->dtsv_var; 4884 } 4885 4886 dvar = dtrace_dynvar(dstate, nkeys, key, 4887 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4888 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4889 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 4890 4891 if (dvar == NULL) { 4892 regs[rd] = 0; 4893 break; 4894 } 4895 4896 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4897 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4898 } else { 4899 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4900 } 4901 4902 break; 4903 } 4904 4905 case DIF_OP_STGAA: 4906 case DIF_OP_STTAA: { 4907 dtrace_dynvar_t *dvar; 4908 dtrace_key_t *key = tupregs; 4909 uint_t nkeys = ttop; 4910 4911 id = DIF_INSTR_VAR(instr); 4912 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4913 id -= DIF_VAR_OTHER_UBASE; 4914 4915 key[nkeys].dttk_value = (uint64_t)id; 4916 key[nkeys++].dttk_size = 0; 4917 4918 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4919 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4920 key[nkeys++].dttk_size = 0; 4921 v = &vstate->dtvs_tlocals[id]; 4922 } else { 4923 v = &vstate->dtvs_globals[id]->dtsv_var; 4924 } 4925 4926 dvar = dtrace_dynvar(dstate, nkeys, key, 4927 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4928 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4929 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4930 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4931 4932 if (dvar == NULL) 4933 break; 4934 4935 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4936 if (!dtrace_vcanload( 4937 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4938 mstate, vstate)) 4939 break; 4940 4941 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4942 dvar->dtdv_data, &v->dtdv_type); 4943 } else { 4944 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4945 } 4946 4947 break; 4948 } 4949 4950 case DIF_OP_ALLOCS: { 4951 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4952 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4953 4954 /* 4955 * Rounding up the user allocation size could have 4956 * overflowed large, bogus allocations (like -1ULL) to 4957 * 0. 4958 */ 4959 if (size < regs[r1] || 4960 !DTRACE_INSCRATCH(mstate, size)) { 4961 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4962 regs[rd] = NULL; 4963 break; 4964 } 4965 4966 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 4967 mstate->dtms_scratch_ptr += size; 4968 regs[rd] = ptr; 4969 break; 4970 } 4971 4972 case DIF_OP_COPYS: 4973 if (!dtrace_canstore(regs[rd], regs[r2], 4974 mstate, vstate)) { 4975 *flags |= CPU_DTRACE_BADADDR; 4976 *illval = regs[rd]; 4977 break; 4978 } 4979 4980 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 4981 break; 4982 4983 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4984 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4985 break; 4986 4987 case DIF_OP_STB: 4988 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4989 *flags |= CPU_DTRACE_BADADDR; 4990 *illval = regs[rd]; 4991 break; 4992 } 4993 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4994 break; 4995 4996 case DIF_OP_STH: 4997 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4998 *flags |= CPU_DTRACE_BADADDR; 4999 *illval = regs[rd]; 5000 break; 5001 } 5002 if (regs[rd] & 1) { 5003 *flags |= CPU_DTRACE_BADALIGN; 5004 *illval = regs[rd]; 5005 break; 5006 } 5007 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5008 break; 5009 5010 case DIF_OP_STW: 5011 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5012 *flags |= CPU_DTRACE_BADADDR; 5013 *illval = regs[rd]; 5014 break; 5015 } 5016 if (regs[rd] & 3) { 5017 *flags |= CPU_DTRACE_BADALIGN; 5018 *illval = regs[rd]; 5019 break; 5020 } 5021 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5022 break; 5023 5024 case DIF_OP_STX: 5025 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5026 *flags |= CPU_DTRACE_BADADDR; 5027 *illval = regs[rd]; 5028 break; 5029 } 5030 if (regs[rd] & 7) { 5031 *flags |= CPU_DTRACE_BADALIGN; 5032 *illval = regs[rd]; 5033 break; 5034 } 5035 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5036 break; 5037 } 5038 } 5039 5040 if (!(*flags & CPU_DTRACE_FAULT)) 5041 return (rval); 5042 5043 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5044 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5045 5046 return (0); 5047 } 5048 5049 static void 5050 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5051 { 5052 dtrace_probe_t *probe = ecb->dte_probe; 5053 dtrace_provider_t *prov = probe->dtpr_provider; 5054 char c[DTRACE_FULLNAMELEN + 80], *str; 5055 char *msg = "dtrace: breakpoint action at probe "; 5056 char *ecbmsg = " (ecb "; 5057 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5058 uintptr_t val = (uintptr_t)ecb; 5059 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5060 5061 if (dtrace_destructive_disallow) 5062 return; 5063 5064 /* 5065 * It's impossible to be taking action on the NULL probe. 5066 */ 5067 ASSERT(probe != NULL); 5068 5069 /* 5070 * This is a poor man's (destitute man's?) sprintf(): we want to 5071 * print the provider name, module name, function name and name of 5072 * the probe, along with the hex address of the ECB with the breakpoint 5073 * action -- all of which we must place in the character buffer by 5074 * hand. 5075 */ 5076 while (*msg != '\0') 5077 c[i++] = *msg++; 5078 5079 for (str = prov->dtpv_name; *str != '\0'; str++) 5080 c[i++] = *str; 5081 c[i++] = ':'; 5082 5083 for (str = probe->dtpr_mod; *str != '\0'; str++) 5084 c[i++] = *str; 5085 c[i++] = ':'; 5086 5087 for (str = probe->dtpr_func; *str != '\0'; str++) 5088 c[i++] = *str; 5089 c[i++] = ':'; 5090 5091 for (str = probe->dtpr_name; *str != '\0'; str++) 5092 c[i++] = *str; 5093 5094 while (*ecbmsg != '\0') 5095 c[i++] = *ecbmsg++; 5096 5097 while (shift >= 0) { 5098 mask = (uintptr_t)0xf << shift; 5099 5100 if (val >= ((uintptr_t)1 << shift)) 5101 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5102 shift -= 4; 5103 } 5104 5105 c[i++] = ')'; 5106 c[i] = '\0'; 5107 5108 debug_enter(c); 5109 } 5110 5111 static void 5112 dtrace_action_panic(dtrace_ecb_t *ecb) 5113 { 5114 dtrace_probe_t *probe = ecb->dte_probe; 5115 5116 /* 5117 * It's impossible to be taking action on the NULL probe. 5118 */ 5119 ASSERT(probe != NULL); 5120 5121 if (dtrace_destructive_disallow) 5122 return; 5123 5124 if (dtrace_panicked != NULL) 5125 return; 5126 5127 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5128 return; 5129 5130 /* 5131 * We won the right to panic. (We want to be sure that only one 5132 * thread calls panic() from dtrace_probe(), and that panic() is 5133 * called exactly once.) 5134 */ 5135 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5136 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5137 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5138 } 5139 5140 static void 5141 dtrace_action_raise(uint64_t sig) 5142 { 5143 if (dtrace_destructive_disallow) 5144 return; 5145 5146 if (sig >= NSIG) { 5147 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5148 return; 5149 } 5150 5151 /* 5152 * raise() has a queue depth of 1 -- we ignore all subsequent 5153 * invocations of the raise() action. 5154 */ 5155 if (curthread->t_dtrace_sig == 0) 5156 curthread->t_dtrace_sig = (uint8_t)sig; 5157 5158 curthread->t_sig_check = 1; 5159 aston(curthread); 5160 } 5161 5162 static void 5163 dtrace_action_stop(void) 5164 { 5165 if (dtrace_destructive_disallow) 5166 return; 5167 5168 if (!curthread->t_dtrace_stop) { 5169 curthread->t_dtrace_stop = 1; 5170 curthread->t_sig_check = 1; 5171 aston(curthread); 5172 } 5173 } 5174 5175 static void 5176 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5177 { 5178 hrtime_t now; 5179 volatile uint16_t *flags; 5180 cpu_t *cpu = CPU; 5181 5182 if (dtrace_destructive_disallow) 5183 return; 5184 5185 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5186 5187 now = dtrace_gethrtime(); 5188 5189 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5190 /* 5191 * We need to advance the mark to the current time. 5192 */ 5193 cpu->cpu_dtrace_chillmark = now; 5194 cpu->cpu_dtrace_chilled = 0; 5195 } 5196 5197 /* 5198 * Now check to see if the requested chill time would take us over 5199 * the maximum amount of time allowed in the chill interval. (Or 5200 * worse, if the calculation itself induces overflow.) 5201 */ 5202 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5203 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5204 *flags |= CPU_DTRACE_ILLOP; 5205 return; 5206 } 5207 5208 while (dtrace_gethrtime() - now < val) 5209 continue; 5210 5211 /* 5212 * Normally, we assure that the value of the variable "timestamp" does 5213 * not change within an ECB. The presence of chill() represents an 5214 * exception to this rule, however. 5215 */ 5216 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5217 cpu->cpu_dtrace_chilled += val; 5218 } 5219 5220 static void 5221 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5222 uint64_t *buf, uint64_t arg) 5223 { 5224 int nframes = DTRACE_USTACK_NFRAMES(arg); 5225 int strsize = DTRACE_USTACK_STRSIZE(arg); 5226 uint64_t *pcs = &buf[1], *fps; 5227 char *str = (char *)&pcs[nframes]; 5228 int size, offs = 0, i, j; 5229 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5230 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5231 char *sym; 5232 5233 /* 5234 * Should be taking a faster path if string space has not been 5235 * allocated. 5236 */ 5237 ASSERT(strsize != 0); 5238 5239 /* 5240 * We will first allocate some temporary space for the frame pointers. 5241 */ 5242 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5243 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5244 (nframes * sizeof (uint64_t)); 5245 5246 if (!DTRACE_INSCRATCH(mstate, size)) { 5247 /* 5248 * Not enough room for our frame pointers -- need to indicate 5249 * that we ran out of scratch space. 5250 */ 5251 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5252 return; 5253 } 5254 5255 mstate->dtms_scratch_ptr += size; 5256 saved = mstate->dtms_scratch_ptr; 5257 5258 /* 5259 * Now get a stack with both program counters and frame pointers. 5260 */ 5261 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5262 dtrace_getufpstack(buf, fps, nframes + 1); 5263 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5264 5265 /* 5266 * If that faulted, we're cooked. 5267 */ 5268 if (*flags & CPU_DTRACE_FAULT) 5269 goto out; 5270 5271 /* 5272 * Now we want to walk up the stack, calling the USTACK helper. For 5273 * each iteration, we restore the scratch pointer. 5274 */ 5275 for (i = 0; i < nframes; i++) { 5276 mstate->dtms_scratch_ptr = saved; 5277 5278 if (offs >= strsize) 5279 break; 5280 5281 sym = (char *)(uintptr_t)dtrace_helper( 5282 DTRACE_HELPER_ACTION_USTACK, 5283 mstate, state, pcs[i], fps[i]); 5284 5285 /* 5286 * If we faulted while running the helper, we're going to 5287 * clear the fault and null out the corresponding string. 5288 */ 5289 if (*flags & CPU_DTRACE_FAULT) { 5290 *flags &= ~CPU_DTRACE_FAULT; 5291 str[offs++] = '\0'; 5292 continue; 5293 } 5294 5295 if (sym == NULL) { 5296 str[offs++] = '\0'; 5297 continue; 5298 } 5299 5300 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5301 5302 /* 5303 * Now copy in the string that the helper returned to us. 5304 */ 5305 for (j = 0; offs + j < strsize; j++) { 5306 if ((str[offs + j] = sym[j]) == '\0') 5307 break; 5308 } 5309 5310 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5311 5312 offs += j + 1; 5313 } 5314 5315 if (offs >= strsize) { 5316 /* 5317 * If we didn't have room for all of the strings, we don't 5318 * abort processing -- this needn't be a fatal error -- but we 5319 * still want to increment a counter (dts_stkstroverflows) to 5320 * allow this condition to be warned about. (If this is from 5321 * a jstack() action, it is easily tuned via jstackstrsize.) 5322 */ 5323 dtrace_error(&state->dts_stkstroverflows); 5324 } 5325 5326 while (offs < strsize) 5327 str[offs++] = '\0'; 5328 5329 out: 5330 mstate->dtms_scratch_ptr = old; 5331 } 5332 5333 /* 5334 * If you're looking for the epicenter of DTrace, you just found it. This 5335 * is the function called by the provider to fire a probe -- from which all 5336 * subsequent probe-context DTrace activity emanates. 5337 */ 5338 void 5339 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5340 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5341 { 5342 processorid_t cpuid; 5343 dtrace_icookie_t cookie; 5344 dtrace_probe_t *probe; 5345 dtrace_mstate_t mstate; 5346 dtrace_ecb_t *ecb; 5347 dtrace_action_t *act; 5348 intptr_t offs; 5349 size_t size; 5350 int vtime, onintr; 5351 volatile uint16_t *flags; 5352 hrtime_t now; 5353 5354 /* 5355 * Kick out immediately if this CPU is still being born (in which case 5356 * curthread will be set to -1) or the current thread can't allow 5357 * probes in its current context. 5358 */ 5359 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5360 return; 5361 5362 cookie = dtrace_interrupt_disable(); 5363 probe = dtrace_probes[id - 1]; 5364 cpuid = CPU->cpu_id; 5365 onintr = CPU_ON_INTR(CPU); 5366 5367 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5368 probe->dtpr_predcache == curthread->t_predcache) { 5369 /* 5370 * We have hit in the predicate cache; we know that 5371 * this predicate would evaluate to be false. 5372 */ 5373 dtrace_interrupt_enable(cookie); 5374 return; 5375 } 5376 5377 if (panic_quiesce) { 5378 /* 5379 * We don't trace anything if we're panicking. 5380 */ 5381 dtrace_interrupt_enable(cookie); 5382 return; 5383 } 5384 5385 now = dtrace_gethrtime(); 5386 vtime = dtrace_vtime_references != 0; 5387 5388 if (vtime && curthread->t_dtrace_start) 5389 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5390 5391 mstate.dtms_difo = NULL; 5392 mstate.dtms_probe = probe; 5393 mstate.dtms_strtok = NULL; 5394 mstate.dtms_arg[0] = arg0; 5395 mstate.dtms_arg[1] = arg1; 5396 mstate.dtms_arg[2] = arg2; 5397 mstate.dtms_arg[3] = arg3; 5398 mstate.dtms_arg[4] = arg4; 5399 5400 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5401 5402 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5403 dtrace_predicate_t *pred = ecb->dte_predicate; 5404 dtrace_state_t *state = ecb->dte_state; 5405 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5406 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5407 dtrace_vstate_t *vstate = &state->dts_vstate; 5408 dtrace_provider_t *prov = probe->dtpr_provider; 5409 int committed = 0; 5410 caddr_t tomax; 5411 5412 /* 5413 * A little subtlety with the following (seemingly innocuous) 5414 * declaration of the automatic 'val': by looking at the 5415 * code, you might think that it could be declared in the 5416 * action processing loop, below. (That is, it's only used in 5417 * the action processing loop.) However, it must be declared 5418 * out of that scope because in the case of DIF expression 5419 * arguments to aggregating actions, one iteration of the 5420 * action loop will use the last iteration's value. 5421 */ 5422 #ifdef lint 5423 uint64_t val = 0; 5424 #else 5425 uint64_t val; 5426 #endif 5427 5428 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5429 *flags &= ~CPU_DTRACE_ERROR; 5430 5431 if (prov == dtrace_provider) { 5432 /* 5433 * If dtrace itself is the provider of this probe, 5434 * we're only going to continue processing the ECB if 5435 * arg0 (the dtrace_state_t) is equal to the ECB's 5436 * creating state. (This prevents disjoint consumers 5437 * from seeing one another's metaprobes.) 5438 */ 5439 if (arg0 != (uint64_t)(uintptr_t)state) 5440 continue; 5441 } 5442 5443 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5444 /* 5445 * We're not currently active. If our provider isn't 5446 * the dtrace pseudo provider, we're not interested. 5447 */ 5448 if (prov != dtrace_provider) 5449 continue; 5450 5451 /* 5452 * Now we must further check if we are in the BEGIN 5453 * probe. If we are, we will only continue processing 5454 * if we're still in WARMUP -- if one BEGIN enabling 5455 * has invoked the exit() action, we don't want to 5456 * evaluate subsequent BEGIN enablings. 5457 */ 5458 if (probe->dtpr_id == dtrace_probeid_begin && 5459 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5460 ASSERT(state->dts_activity == 5461 DTRACE_ACTIVITY_DRAINING); 5462 continue; 5463 } 5464 } 5465 5466 if (ecb->dte_cond) { 5467 /* 5468 * If the dte_cond bits indicate that this 5469 * consumer is only allowed to see user-mode firings 5470 * of this probe, call the provider's dtps_usermode() 5471 * entry point to check that the probe was fired 5472 * while in a user context. Skip this ECB if that's 5473 * not the case. 5474 */ 5475 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5476 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5477 probe->dtpr_id, probe->dtpr_arg) == 0) 5478 continue; 5479 5480 /* 5481 * This is more subtle than it looks. We have to be 5482 * absolutely certain that CRED() isn't going to 5483 * change out from under us so it's only legit to 5484 * examine that structure if we're in constrained 5485 * situations. Currently, the only times we'll this 5486 * check is if a non-super-user has enabled the 5487 * profile or syscall providers -- providers that 5488 * allow visibility of all processes. For the 5489 * profile case, the check above will ensure that 5490 * we're examining a user context. 5491 */ 5492 if (ecb->dte_cond & DTRACE_COND_OWNER) { 5493 cred_t *cr; 5494 cred_t *s_cr = 5495 ecb->dte_state->dts_cred.dcr_cred; 5496 proc_t *proc; 5497 5498 ASSERT(s_cr != NULL); 5499 5500 if ((cr = CRED()) == NULL || 5501 s_cr->cr_uid != cr->cr_uid || 5502 s_cr->cr_uid != cr->cr_ruid || 5503 s_cr->cr_uid != cr->cr_suid || 5504 s_cr->cr_gid != cr->cr_gid || 5505 s_cr->cr_gid != cr->cr_rgid || 5506 s_cr->cr_gid != cr->cr_sgid || 5507 (proc = ttoproc(curthread)) == NULL || 5508 (proc->p_flag & SNOCD)) 5509 continue; 5510 } 5511 5512 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 5513 cred_t *cr; 5514 cred_t *s_cr = 5515 ecb->dte_state->dts_cred.dcr_cred; 5516 5517 ASSERT(s_cr != NULL); 5518 5519 if ((cr = CRED()) == NULL || 5520 s_cr->cr_zone->zone_id != 5521 cr->cr_zone->zone_id) 5522 continue; 5523 } 5524 } 5525 5526 if (now - state->dts_alive > dtrace_deadman_timeout) { 5527 /* 5528 * We seem to be dead. Unless we (a) have kernel 5529 * destructive permissions (b) have expicitly enabled 5530 * destructive actions and (c) destructive actions have 5531 * not been disabled, we're going to transition into 5532 * the KILLED state, from which no further processing 5533 * on this state will be performed. 5534 */ 5535 if (!dtrace_priv_kernel_destructive(state) || 5536 !state->dts_cred.dcr_destructive || 5537 dtrace_destructive_disallow) { 5538 void *activity = &state->dts_activity; 5539 dtrace_activity_t current; 5540 5541 do { 5542 current = state->dts_activity; 5543 } while (dtrace_cas32(activity, current, 5544 DTRACE_ACTIVITY_KILLED) != current); 5545 5546 continue; 5547 } 5548 } 5549 5550 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 5551 ecb->dte_alignment, state, &mstate)) < 0) 5552 continue; 5553 5554 tomax = buf->dtb_tomax; 5555 ASSERT(tomax != NULL); 5556 5557 if (ecb->dte_size != 0) 5558 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5559 5560 mstate.dtms_epid = ecb->dte_epid; 5561 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5562 5563 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 5564 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 5565 else 5566 mstate.dtms_access = 0; 5567 5568 if (pred != NULL) { 5569 dtrace_difo_t *dp = pred->dtp_difo; 5570 int rval; 5571 5572 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5573 5574 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5575 dtrace_cacheid_t cid = probe->dtpr_predcache; 5576 5577 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5578 /* 5579 * Update the predicate cache... 5580 */ 5581 ASSERT(cid == pred->dtp_cacheid); 5582 curthread->t_predcache = cid; 5583 } 5584 5585 continue; 5586 } 5587 } 5588 5589 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5590 act != NULL; act = act->dta_next) { 5591 size_t valoffs; 5592 dtrace_difo_t *dp; 5593 dtrace_recdesc_t *rec = &act->dta_rec; 5594 5595 size = rec->dtrd_size; 5596 valoffs = offs + rec->dtrd_offset; 5597 5598 if (DTRACEACT_ISAGG(act->dta_kind)) { 5599 uint64_t v = 0xbad; 5600 dtrace_aggregation_t *agg; 5601 5602 agg = (dtrace_aggregation_t *)act; 5603 5604 if ((dp = act->dta_difo) != NULL) 5605 v = dtrace_dif_emulate(dp, 5606 &mstate, vstate, state); 5607 5608 if (*flags & CPU_DTRACE_ERROR) 5609 continue; 5610 5611 /* 5612 * Note that we always pass the expression 5613 * value from the previous iteration of the 5614 * action loop. This value will only be used 5615 * if there is an expression argument to the 5616 * aggregating action, denoted by the 5617 * dtag_hasarg field. 5618 */ 5619 dtrace_aggregate(agg, buf, 5620 offs, aggbuf, v, val); 5621 continue; 5622 } 5623 5624 switch (act->dta_kind) { 5625 case DTRACEACT_STOP: 5626 if (dtrace_priv_proc_destructive(state)) 5627 dtrace_action_stop(); 5628 continue; 5629 5630 case DTRACEACT_BREAKPOINT: 5631 if (dtrace_priv_kernel_destructive(state)) 5632 dtrace_action_breakpoint(ecb); 5633 continue; 5634 5635 case DTRACEACT_PANIC: 5636 if (dtrace_priv_kernel_destructive(state)) 5637 dtrace_action_panic(ecb); 5638 continue; 5639 5640 case DTRACEACT_STACK: 5641 if (!dtrace_priv_kernel(state)) 5642 continue; 5643 5644 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5645 size / sizeof (pc_t), probe->dtpr_aframes, 5646 DTRACE_ANCHORED(probe) ? NULL : 5647 (uint32_t *)arg0); 5648 5649 continue; 5650 5651 case DTRACEACT_JSTACK: 5652 case DTRACEACT_USTACK: 5653 if (!dtrace_priv_proc(state)) 5654 continue; 5655 5656 /* 5657 * See comment in DIF_VAR_PID. 5658 */ 5659 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5660 CPU_ON_INTR(CPU)) { 5661 int depth = DTRACE_USTACK_NFRAMES( 5662 rec->dtrd_arg) + 1; 5663 5664 dtrace_bzero((void *)(tomax + valoffs), 5665 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5666 + depth * sizeof (uint64_t)); 5667 5668 continue; 5669 } 5670 5671 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5672 curproc->p_dtrace_helpers != NULL) { 5673 /* 5674 * This is the slow path -- we have 5675 * allocated string space, and we're 5676 * getting the stack of a process that 5677 * has helpers. Call into a separate 5678 * routine to perform this processing. 5679 */ 5680 dtrace_action_ustack(&mstate, state, 5681 (uint64_t *)(tomax + valoffs), 5682 rec->dtrd_arg); 5683 continue; 5684 } 5685 5686 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5687 dtrace_getupcstack((uint64_t *) 5688 (tomax + valoffs), 5689 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5690 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5691 continue; 5692 5693 default: 5694 break; 5695 } 5696 5697 dp = act->dta_difo; 5698 ASSERT(dp != NULL); 5699 5700 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5701 5702 if (*flags & CPU_DTRACE_ERROR) 5703 continue; 5704 5705 switch (act->dta_kind) { 5706 case DTRACEACT_SPECULATE: 5707 ASSERT(buf == &state->dts_buffer[cpuid]); 5708 buf = dtrace_speculation_buffer(state, 5709 cpuid, val); 5710 5711 if (buf == NULL) { 5712 *flags |= CPU_DTRACE_DROP; 5713 continue; 5714 } 5715 5716 offs = dtrace_buffer_reserve(buf, 5717 ecb->dte_needed, ecb->dte_alignment, 5718 state, NULL); 5719 5720 if (offs < 0) { 5721 *flags |= CPU_DTRACE_DROP; 5722 continue; 5723 } 5724 5725 tomax = buf->dtb_tomax; 5726 ASSERT(tomax != NULL); 5727 5728 if (ecb->dte_size != 0) 5729 DTRACE_STORE(uint32_t, tomax, offs, 5730 ecb->dte_epid); 5731 continue; 5732 5733 case DTRACEACT_CHILL: 5734 if (dtrace_priv_kernel_destructive(state)) 5735 dtrace_action_chill(&mstate, val); 5736 continue; 5737 5738 case DTRACEACT_RAISE: 5739 if (dtrace_priv_proc_destructive(state)) 5740 dtrace_action_raise(val); 5741 continue; 5742 5743 case DTRACEACT_COMMIT: 5744 ASSERT(!committed); 5745 5746 /* 5747 * We need to commit our buffer state. 5748 */ 5749 if (ecb->dte_size) 5750 buf->dtb_offset = offs + ecb->dte_size; 5751 buf = &state->dts_buffer[cpuid]; 5752 dtrace_speculation_commit(state, cpuid, val); 5753 committed = 1; 5754 continue; 5755 5756 case DTRACEACT_DISCARD: 5757 dtrace_speculation_discard(state, cpuid, val); 5758 continue; 5759 5760 case DTRACEACT_DIFEXPR: 5761 case DTRACEACT_LIBACT: 5762 case DTRACEACT_PRINTF: 5763 case DTRACEACT_PRINTA: 5764 case DTRACEACT_SYSTEM: 5765 case DTRACEACT_FREOPEN: 5766 break; 5767 5768 case DTRACEACT_SYM: 5769 case DTRACEACT_MOD: 5770 if (!dtrace_priv_kernel(state)) 5771 continue; 5772 break; 5773 5774 case DTRACEACT_USYM: 5775 case DTRACEACT_UMOD: 5776 case DTRACEACT_UADDR: { 5777 struct pid *pid = curthread->t_procp->p_pidp; 5778 5779 if (!dtrace_priv_proc(state)) 5780 continue; 5781 5782 DTRACE_STORE(uint64_t, tomax, 5783 valoffs, (uint64_t)pid->pid_id); 5784 DTRACE_STORE(uint64_t, tomax, 5785 valoffs + sizeof (uint64_t), val); 5786 5787 continue; 5788 } 5789 5790 case DTRACEACT_EXIT: { 5791 /* 5792 * For the exit action, we are going to attempt 5793 * to atomically set our activity to be 5794 * draining. If this fails (either because 5795 * another CPU has beat us to the exit action, 5796 * or because our current activity is something 5797 * other than ACTIVE or WARMUP), we will 5798 * continue. This assures that the exit action 5799 * can be successfully recorded at most once 5800 * when we're in the ACTIVE state. If we're 5801 * encountering the exit() action while in 5802 * COOLDOWN, however, we want to honor the new 5803 * status code. (We know that we're the only 5804 * thread in COOLDOWN, so there is no race.) 5805 */ 5806 void *activity = &state->dts_activity; 5807 dtrace_activity_t current = state->dts_activity; 5808 5809 if (current == DTRACE_ACTIVITY_COOLDOWN) 5810 break; 5811 5812 if (current != DTRACE_ACTIVITY_WARMUP) 5813 current = DTRACE_ACTIVITY_ACTIVE; 5814 5815 if (dtrace_cas32(activity, current, 5816 DTRACE_ACTIVITY_DRAINING) != current) { 5817 *flags |= CPU_DTRACE_DROP; 5818 continue; 5819 } 5820 5821 break; 5822 } 5823 5824 default: 5825 ASSERT(0); 5826 } 5827 5828 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 5829 uintptr_t end = valoffs + size; 5830 5831 if (!dtrace_vcanload((void *)(uintptr_t)val, 5832 &dp->dtdo_rtype, &mstate, vstate)) 5833 continue; 5834 5835 /* 5836 * If this is a string, we're going to only 5837 * load until we find the zero byte -- after 5838 * which we'll store zero bytes. 5839 */ 5840 if (dp->dtdo_rtype.dtdt_kind == 5841 DIF_TYPE_STRING) { 5842 char c = '\0' + 1; 5843 int intuple = act->dta_intuple; 5844 size_t s; 5845 5846 for (s = 0; s < size; s++) { 5847 if (c != '\0') 5848 c = dtrace_load8(val++); 5849 5850 DTRACE_STORE(uint8_t, tomax, 5851 valoffs++, c); 5852 5853 if (c == '\0' && intuple) 5854 break; 5855 } 5856 5857 continue; 5858 } 5859 5860 while (valoffs < end) { 5861 DTRACE_STORE(uint8_t, tomax, valoffs++, 5862 dtrace_load8(val++)); 5863 } 5864 5865 continue; 5866 } 5867 5868 switch (size) { 5869 case 0: 5870 break; 5871 5872 case sizeof (uint8_t): 5873 DTRACE_STORE(uint8_t, tomax, valoffs, val); 5874 break; 5875 case sizeof (uint16_t): 5876 DTRACE_STORE(uint16_t, tomax, valoffs, val); 5877 break; 5878 case sizeof (uint32_t): 5879 DTRACE_STORE(uint32_t, tomax, valoffs, val); 5880 break; 5881 case sizeof (uint64_t): 5882 DTRACE_STORE(uint64_t, tomax, valoffs, val); 5883 break; 5884 default: 5885 /* 5886 * Any other size should have been returned by 5887 * reference, not by value. 5888 */ 5889 ASSERT(0); 5890 break; 5891 } 5892 } 5893 5894 if (*flags & CPU_DTRACE_DROP) 5895 continue; 5896 5897 if (*flags & CPU_DTRACE_FAULT) { 5898 int ndx; 5899 dtrace_action_t *err; 5900 5901 buf->dtb_errors++; 5902 5903 if (probe->dtpr_id == dtrace_probeid_error) { 5904 /* 5905 * There's nothing we can do -- we had an 5906 * error on the error probe. We bump an 5907 * error counter to at least indicate that 5908 * this condition happened. 5909 */ 5910 dtrace_error(&state->dts_dblerrors); 5911 continue; 5912 } 5913 5914 if (vtime) { 5915 /* 5916 * Before recursing on dtrace_probe(), we 5917 * need to explicitly clear out our start 5918 * time to prevent it from being accumulated 5919 * into t_dtrace_vtime. 5920 */ 5921 curthread->t_dtrace_start = 0; 5922 } 5923 5924 /* 5925 * Iterate over the actions to figure out which action 5926 * we were processing when we experienced the error. 5927 * Note that act points _past_ the faulting action; if 5928 * act is ecb->dte_action, the fault was in the 5929 * predicate, if it's ecb->dte_action->dta_next it's 5930 * in action #1, and so on. 5931 */ 5932 for (err = ecb->dte_action, ndx = 0; 5933 err != act; err = err->dta_next, ndx++) 5934 continue; 5935 5936 dtrace_probe_error(state, ecb->dte_epid, ndx, 5937 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5938 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 5939 cpu_core[cpuid].cpuc_dtrace_illval); 5940 5941 continue; 5942 } 5943 5944 if (!committed) 5945 buf->dtb_offset = offs + ecb->dte_size; 5946 } 5947 5948 if (vtime) 5949 curthread->t_dtrace_start = dtrace_gethrtime(); 5950 5951 dtrace_interrupt_enable(cookie); 5952 } 5953 5954 /* 5955 * DTrace Probe Hashing Functions 5956 * 5957 * The functions in this section (and indeed, the functions in remaining 5958 * sections) are not _called_ from probe context. (Any exceptions to this are 5959 * marked with a "Note:".) Rather, they are called from elsewhere in the 5960 * DTrace framework to look-up probes in, add probes to and remove probes from 5961 * the DTrace probe hashes. (Each probe is hashed by each element of the 5962 * probe tuple -- allowing for fast lookups, regardless of what was 5963 * specified.) 5964 */ 5965 static uint_t 5966 dtrace_hash_str(char *p) 5967 { 5968 unsigned int g; 5969 uint_t hval = 0; 5970 5971 while (*p) { 5972 hval = (hval << 4) + *p++; 5973 if ((g = (hval & 0xf0000000)) != 0) 5974 hval ^= g >> 24; 5975 hval &= ~g; 5976 } 5977 return (hval); 5978 } 5979 5980 static dtrace_hash_t * 5981 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5982 { 5983 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5984 5985 hash->dth_stroffs = stroffs; 5986 hash->dth_nextoffs = nextoffs; 5987 hash->dth_prevoffs = prevoffs; 5988 5989 hash->dth_size = 1; 5990 hash->dth_mask = hash->dth_size - 1; 5991 5992 hash->dth_tab = kmem_zalloc(hash->dth_size * 5993 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5994 5995 return (hash); 5996 } 5997 5998 static void 5999 dtrace_hash_destroy(dtrace_hash_t *hash) 6000 { 6001 #ifdef DEBUG 6002 int i; 6003 6004 for (i = 0; i < hash->dth_size; i++) 6005 ASSERT(hash->dth_tab[i] == NULL); 6006 #endif 6007 6008 kmem_free(hash->dth_tab, 6009 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6010 kmem_free(hash, sizeof (dtrace_hash_t)); 6011 } 6012 6013 static void 6014 dtrace_hash_resize(dtrace_hash_t *hash) 6015 { 6016 int size = hash->dth_size, i, ndx; 6017 int new_size = hash->dth_size << 1; 6018 int new_mask = new_size - 1; 6019 dtrace_hashbucket_t **new_tab, *bucket, *next; 6020 6021 ASSERT((new_size & new_mask) == 0); 6022 6023 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6024 6025 for (i = 0; i < size; i++) { 6026 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6027 dtrace_probe_t *probe = bucket->dthb_chain; 6028 6029 ASSERT(probe != NULL); 6030 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6031 6032 next = bucket->dthb_next; 6033 bucket->dthb_next = new_tab[ndx]; 6034 new_tab[ndx] = bucket; 6035 } 6036 } 6037 6038 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6039 hash->dth_tab = new_tab; 6040 hash->dth_size = new_size; 6041 hash->dth_mask = new_mask; 6042 } 6043 6044 static void 6045 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6046 { 6047 int hashval = DTRACE_HASHSTR(hash, new); 6048 int ndx = hashval & hash->dth_mask; 6049 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6050 dtrace_probe_t **nextp, **prevp; 6051 6052 for (; bucket != NULL; bucket = bucket->dthb_next) { 6053 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6054 goto add; 6055 } 6056 6057 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6058 dtrace_hash_resize(hash); 6059 dtrace_hash_add(hash, new); 6060 return; 6061 } 6062 6063 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6064 bucket->dthb_next = hash->dth_tab[ndx]; 6065 hash->dth_tab[ndx] = bucket; 6066 hash->dth_nbuckets++; 6067 6068 add: 6069 nextp = DTRACE_HASHNEXT(hash, new); 6070 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6071 *nextp = bucket->dthb_chain; 6072 6073 if (bucket->dthb_chain != NULL) { 6074 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6075 ASSERT(*prevp == NULL); 6076 *prevp = new; 6077 } 6078 6079 bucket->dthb_chain = new; 6080 bucket->dthb_len++; 6081 } 6082 6083 static dtrace_probe_t * 6084 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6085 { 6086 int hashval = DTRACE_HASHSTR(hash, template); 6087 int ndx = hashval & hash->dth_mask; 6088 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6089 6090 for (; bucket != NULL; bucket = bucket->dthb_next) { 6091 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6092 return (bucket->dthb_chain); 6093 } 6094 6095 return (NULL); 6096 } 6097 6098 static int 6099 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6100 { 6101 int hashval = DTRACE_HASHSTR(hash, template); 6102 int ndx = hashval & hash->dth_mask; 6103 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6104 6105 for (; bucket != NULL; bucket = bucket->dthb_next) { 6106 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6107 return (bucket->dthb_len); 6108 } 6109 6110 return (NULL); 6111 } 6112 6113 static void 6114 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6115 { 6116 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6117 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6118 6119 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6120 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6121 6122 /* 6123 * Find the bucket that we're removing this probe from. 6124 */ 6125 for (; bucket != NULL; bucket = bucket->dthb_next) { 6126 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6127 break; 6128 } 6129 6130 ASSERT(bucket != NULL); 6131 6132 if (*prevp == NULL) { 6133 if (*nextp == NULL) { 6134 /* 6135 * The removed probe was the only probe on this 6136 * bucket; we need to remove the bucket. 6137 */ 6138 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6139 6140 ASSERT(bucket->dthb_chain == probe); 6141 ASSERT(b != NULL); 6142 6143 if (b == bucket) { 6144 hash->dth_tab[ndx] = bucket->dthb_next; 6145 } else { 6146 while (b->dthb_next != bucket) 6147 b = b->dthb_next; 6148 b->dthb_next = bucket->dthb_next; 6149 } 6150 6151 ASSERT(hash->dth_nbuckets > 0); 6152 hash->dth_nbuckets--; 6153 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6154 return; 6155 } 6156 6157 bucket->dthb_chain = *nextp; 6158 } else { 6159 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6160 } 6161 6162 if (*nextp != NULL) 6163 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6164 } 6165 6166 /* 6167 * DTrace Utility Functions 6168 * 6169 * These are random utility functions that are _not_ called from probe context. 6170 */ 6171 static int 6172 dtrace_badattr(const dtrace_attribute_t *a) 6173 { 6174 return (a->dtat_name > DTRACE_STABILITY_MAX || 6175 a->dtat_data > DTRACE_STABILITY_MAX || 6176 a->dtat_class > DTRACE_CLASS_MAX); 6177 } 6178 6179 /* 6180 * Return a duplicate copy of a string. If the specified string is NULL, 6181 * this function returns a zero-length string. 6182 */ 6183 static char * 6184 dtrace_strdup(const char *str) 6185 { 6186 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6187 6188 if (str != NULL) 6189 (void) strcpy(new, str); 6190 6191 return (new); 6192 } 6193 6194 #define DTRACE_ISALPHA(c) \ 6195 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6196 6197 static int 6198 dtrace_badname(const char *s) 6199 { 6200 char c; 6201 6202 if (s == NULL || (c = *s++) == '\0') 6203 return (0); 6204 6205 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6206 return (1); 6207 6208 while ((c = *s++) != '\0') { 6209 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6210 c != '-' && c != '_' && c != '.' && c != '`') 6211 return (1); 6212 } 6213 6214 return (0); 6215 } 6216 6217 static void 6218 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6219 { 6220 uint32_t priv; 6221 6222 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6223 /* 6224 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6225 */ 6226 priv = DTRACE_PRIV_ALL; 6227 } else { 6228 *uidp = crgetuid(cr); 6229 *zoneidp = crgetzoneid(cr); 6230 6231 priv = 0; 6232 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6233 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6234 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6235 priv |= DTRACE_PRIV_USER; 6236 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6237 priv |= DTRACE_PRIV_PROC; 6238 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6239 priv |= DTRACE_PRIV_OWNER; 6240 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6241 priv |= DTRACE_PRIV_ZONEOWNER; 6242 } 6243 6244 *privp = priv; 6245 } 6246 6247 #ifdef DTRACE_ERRDEBUG 6248 static void 6249 dtrace_errdebug(const char *str) 6250 { 6251 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6252 int occupied = 0; 6253 6254 mutex_enter(&dtrace_errlock); 6255 dtrace_errlast = str; 6256 dtrace_errthread = curthread; 6257 6258 while (occupied++ < DTRACE_ERRHASHSZ) { 6259 if (dtrace_errhash[hval].dter_msg == str) { 6260 dtrace_errhash[hval].dter_count++; 6261 goto out; 6262 } 6263 6264 if (dtrace_errhash[hval].dter_msg != NULL) { 6265 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6266 continue; 6267 } 6268 6269 dtrace_errhash[hval].dter_msg = str; 6270 dtrace_errhash[hval].dter_count = 1; 6271 goto out; 6272 } 6273 6274 panic("dtrace: undersized error hash"); 6275 out: 6276 mutex_exit(&dtrace_errlock); 6277 } 6278 #endif 6279 6280 /* 6281 * DTrace Matching Functions 6282 * 6283 * These functions are used to match groups of probes, given some elements of 6284 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6285 */ 6286 static int 6287 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6288 zoneid_t zoneid) 6289 { 6290 if (priv != DTRACE_PRIV_ALL) { 6291 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6292 uint32_t match = priv & ppriv; 6293 6294 /* 6295 * No PRIV_DTRACE_* privileges... 6296 */ 6297 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6298 DTRACE_PRIV_KERNEL)) == 0) 6299 return (0); 6300 6301 /* 6302 * No matching bits, but there were bits to match... 6303 */ 6304 if (match == 0 && ppriv != 0) 6305 return (0); 6306 6307 /* 6308 * Need to have permissions to the process, but don't... 6309 */ 6310 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6311 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6312 return (0); 6313 } 6314 6315 /* 6316 * Need to be in the same zone unless we possess the 6317 * privilege to examine all zones. 6318 */ 6319 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6320 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6321 return (0); 6322 } 6323 } 6324 6325 return (1); 6326 } 6327 6328 /* 6329 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6330 * consists of input pattern strings and an ops-vector to evaluate them. 6331 * This function returns >0 for match, 0 for no match, and <0 for error. 6332 */ 6333 static int 6334 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6335 uint32_t priv, uid_t uid, zoneid_t zoneid) 6336 { 6337 dtrace_provider_t *pvp = prp->dtpr_provider; 6338 int rv; 6339 6340 if (pvp->dtpv_defunct) 6341 return (0); 6342 6343 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6344 return (rv); 6345 6346 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6347 return (rv); 6348 6349 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6350 return (rv); 6351 6352 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6353 return (rv); 6354 6355 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6356 return (0); 6357 6358 return (rv); 6359 } 6360 6361 /* 6362 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6363 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6364 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6365 * In addition, all of the recursion cases except for '*' matching have been 6366 * unwound. For '*', we still implement recursive evaluation, but a depth 6367 * counter is maintained and matching is aborted if we recurse too deep. 6368 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6369 */ 6370 static int 6371 dtrace_match_glob(const char *s, const char *p, int depth) 6372 { 6373 const char *olds; 6374 char s1, c; 6375 int gs; 6376 6377 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6378 return (-1); 6379 6380 if (s == NULL) 6381 s = ""; /* treat NULL as empty string */ 6382 6383 top: 6384 olds = s; 6385 s1 = *s++; 6386 6387 if (p == NULL) 6388 return (0); 6389 6390 if ((c = *p++) == '\0') 6391 return (s1 == '\0'); 6392 6393 switch (c) { 6394 case '[': { 6395 int ok = 0, notflag = 0; 6396 char lc = '\0'; 6397 6398 if (s1 == '\0') 6399 return (0); 6400 6401 if (*p == '!') { 6402 notflag = 1; 6403 p++; 6404 } 6405 6406 if ((c = *p++) == '\0') 6407 return (0); 6408 6409 do { 6410 if (c == '-' && lc != '\0' && *p != ']') { 6411 if ((c = *p++) == '\0') 6412 return (0); 6413 if (c == '\\' && (c = *p++) == '\0') 6414 return (0); 6415 6416 if (notflag) { 6417 if (s1 < lc || s1 > c) 6418 ok++; 6419 else 6420 return (0); 6421 } else if (lc <= s1 && s1 <= c) 6422 ok++; 6423 6424 } else if (c == '\\' && (c = *p++) == '\0') 6425 return (0); 6426 6427 lc = c; /* save left-hand 'c' for next iteration */ 6428 6429 if (notflag) { 6430 if (s1 != c) 6431 ok++; 6432 else 6433 return (0); 6434 } else if (s1 == c) 6435 ok++; 6436 6437 if ((c = *p++) == '\0') 6438 return (0); 6439 6440 } while (c != ']'); 6441 6442 if (ok) 6443 goto top; 6444 6445 return (0); 6446 } 6447 6448 case '\\': 6449 if ((c = *p++) == '\0') 6450 return (0); 6451 /*FALLTHRU*/ 6452 6453 default: 6454 if (c != s1) 6455 return (0); 6456 /*FALLTHRU*/ 6457 6458 case '?': 6459 if (s1 != '\0') 6460 goto top; 6461 return (0); 6462 6463 case '*': 6464 while (*p == '*') 6465 p++; /* consecutive *'s are identical to a single one */ 6466 6467 if (*p == '\0') 6468 return (1); 6469 6470 for (s = olds; *s != '\0'; s++) { 6471 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 6472 return (gs); 6473 } 6474 6475 return (0); 6476 } 6477 } 6478 6479 /*ARGSUSED*/ 6480 static int 6481 dtrace_match_string(const char *s, const char *p, int depth) 6482 { 6483 return (s != NULL && strcmp(s, p) == 0); 6484 } 6485 6486 /*ARGSUSED*/ 6487 static int 6488 dtrace_match_nul(const char *s, const char *p, int depth) 6489 { 6490 return (1); /* always match the empty pattern */ 6491 } 6492 6493 /*ARGSUSED*/ 6494 static int 6495 dtrace_match_nonzero(const char *s, const char *p, int depth) 6496 { 6497 return (s != NULL && s[0] != '\0'); 6498 } 6499 6500 static int 6501 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 6502 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 6503 { 6504 dtrace_probe_t template, *probe; 6505 dtrace_hash_t *hash = NULL; 6506 int len, best = INT_MAX, nmatched = 0; 6507 dtrace_id_t i; 6508 6509 ASSERT(MUTEX_HELD(&dtrace_lock)); 6510 6511 /* 6512 * If the probe ID is specified in the key, just lookup by ID and 6513 * invoke the match callback once if a matching probe is found. 6514 */ 6515 if (pkp->dtpk_id != DTRACE_IDNONE) { 6516 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 6517 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 6518 (void) (*matched)(probe, arg); 6519 nmatched++; 6520 } 6521 return (nmatched); 6522 } 6523 6524 template.dtpr_mod = (char *)pkp->dtpk_mod; 6525 template.dtpr_func = (char *)pkp->dtpk_func; 6526 template.dtpr_name = (char *)pkp->dtpk_name; 6527 6528 /* 6529 * We want to find the most distinct of the module name, function 6530 * name, and name. So for each one that is not a glob pattern or 6531 * empty string, we perform a lookup in the corresponding hash and 6532 * use the hash table with the fewest collisions to do our search. 6533 */ 6534 if (pkp->dtpk_mmatch == &dtrace_match_string && 6535 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 6536 best = len; 6537 hash = dtrace_bymod; 6538 } 6539 6540 if (pkp->dtpk_fmatch == &dtrace_match_string && 6541 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 6542 best = len; 6543 hash = dtrace_byfunc; 6544 } 6545 6546 if (pkp->dtpk_nmatch == &dtrace_match_string && 6547 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 6548 best = len; 6549 hash = dtrace_byname; 6550 } 6551 6552 /* 6553 * If we did not select a hash table, iterate over every probe and 6554 * invoke our callback for each one that matches our input probe key. 6555 */ 6556 if (hash == NULL) { 6557 for (i = 0; i < dtrace_nprobes; i++) { 6558 if ((probe = dtrace_probes[i]) == NULL || 6559 dtrace_match_probe(probe, pkp, priv, uid, 6560 zoneid) <= 0) 6561 continue; 6562 6563 nmatched++; 6564 6565 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6566 break; 6567 } 6568 6569 return (nmatched); 6570 } 6571 6572 /* 6573 * If we selected a hash table, iterate over each probe of the same key 6574 * name and invoke the callback for every probe that matches the other 6575 * attributes of our input probe key. 6576 */ 6577 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6578 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6579 6580 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6581 continue; 6582 6583 nmatched++; 6584 6585 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6586 break; 6587 } 6588 6589 return (nmatched); 6590 } 6591 6592 /* 6593 * Return the function pointer dtrace_probecmp() should use to compare the 6594 * specified pattern with a string. For NULL or empty patterns, we select 6595 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6596 * For non-empty non-glob strings, we use dtrace_match_string(). 6597 */ 6598 static dtrace_probekey_f * 6599 dtrace_probekey_func(const char *p) 6600 { 6601 char c; 6602 6603 if (p == NULL || *p == '\0') 6604 return (&dtrace_match_nul); 6605 6606 while ((c = *p++) != '\0') { 6607 if (c == '[' || c == '?' || c == '*' || c == '\\') 6608 return (&dtrace_match_glob); 6609 } 6610 6611 return (&dtrace_match_string); 6612 } 6613 6614 /* 6615 * Build a probe comparison key for use with dtrace_match_probe() from the 6616 * given probe description. By convention, a null key only matches anchored 6617 * probes: if each field is the empty string, reset dtpk_fmatch to 6618 * dtrace_match_nonzero(). 6619 */ 6620 static void 6621 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6622 { 6623 pkp->dtpk_prov = pdp->dtpd_provider; 6624 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6625 6626 pkp->dtpk_mod = pdp->dtpd_mod; 6627 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6628 6629 pkp->dtpk_func = pdp->dtpd_func; 6630 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6631 6632 pkp->dtpk_name = pdp->dtpd_name; 6633 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6634 6635 pkp->dtpk_id = pdp->dtpd_id; 6636 6637 if (pkp->dtpk_id == DTRACE_IDNONE && 6638 pkp->dtpk_pmatch == &dtrace_match_nul && 6639 pkp->dtpk_mmatch == &dtrace_match_nul && 6640 pkp->dtpk_fmatch == &dtrace_match_nul && 6641 pkp->dtpk_nmatch == &dtrace_match_nul) 6642 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6643 } 6644 6645 /* 6646 * DTrace Provider-to-Framework API Functions 6647 * 6648 * These functions implement much of the Provider-to-Framework API, as 6649 * described in <sys/dtrace.h>. The parts of the API not in this section are 6650 * the functions in the API for probe management (found below), and 6651 * dtrace_probe() itself (found above). 6652 */ 6653 6654 /* 6655 * Register the calling provider with the DTrace framework. This should 6656 * generally be called by DTrace providers in their attach(9E) entry point. 6657 */ 6658 int 6659 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6660 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6661 { 6662 dtrace_provider_t *provider; 6663 6664 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6665 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6666 "arguments", name ? name : "<NULL>"); 6667 return (EINVAL); 6668 } 6669 6670 if (name[0] == '\0' || dtrace_badname(name)) { 6671 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6672 "provider name", name); 6673 return (EINVAL); 6674 } 6675 6676 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6677 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6678 pops->dtps_destroy == NULL || 6679 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6680 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6681 "provider ops", name); 6682 return (EINVAL); 6683 } 6684 6685 if (dtrace_badattr(&pap->dtpa_provider) || 6686 dtrace_badattr(&pap->dtpa_mod) || 6687 dtrace_badattr(&pap->dtpa_func) || 6688 dtrace_badattr(&pap->dtpa_name) || 6689 dtrace_badattr(&pap->dtpa_args)) { 6690 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6691 "provider attributes", name); 6692 return (EINVAL); 6693 } 6694 6695 if (priv & ~DTRACE_PRIV_ALL) { 6696 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6697 "privilege attributes", name); 6698 return (EINVAL); 6699 } 6700 6701 if ((priv & DTRACE_PRIV_KERNEL) && 6702 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 6703 pops->dtps_usermode == NULL) { 6704 cmn_err(CE_WARN, "failed to register provider '%s': need " 6705 "dtps_usermode() op for given privilege attributes", name); 6706 return (EINVAL); 6707 } 6708 6709 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 6710 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6711 (void) strcpy(provider->dtpv_name, name); 6712 6713 provider->dtpv_attr = *pap; 6714 provider->dtpv_priv.dtpp_flags = priv; 6715 if (cr != NULL) { 6716 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 6717 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 6718 } 6719 provider->dtpv_pops = *pops; 6720 6721 if (pops->dtps_provide == NULL) { 6722 ASSERT(pops->dtps_provide_module != NULL); 6723 provider->dtpv_pops.dtps_provide = 6724 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 6725 } 6726 6727 if (pops->dtps_provide_module == NULL) { 6728 ASSERT(pops->dtps_provide != NULL); 6729 provider->dtpv_pops.dtps_provide_module = 6730 (void (*)(void *, struct modctl *))dtrace_nullop; 6731 } 6732 6733 if (pops->dtps_suspend == NULL) { 6734 ASSERT(pops->dtps_resume == NULL); 6735 provider->dtpv_pops.dtps_suspend = 6736 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6737 provider->dtpv_pops.dtps_resume = 6738 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6739 } 6740 6741 provider->dtpv_arg = arg; 6742 *idp = (dtrace_provider_id_t)provider; 6743 6744 if (pops == &dtrace_provider_ops) { 6745 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6746 ASSERT(MUTEX_HELD(&dtrace_lock)); 6747 ASSERT(dtrace_anon.dta_enabling == NULL); 6748 6749 /* 6750 * We make sure that the DTrace provider is at the head of 6751 * the provider chain. 6752 */ 6753 provider->dtpv_next = dtrace_provider; 6754 dtrace_provider = provider; 6755 return (0); 6756 } 6757 6758 mutex_enter(&dtrace_provider_lock); 6759 mutex_enter(&dtrace_lock); 6760 6761 /* 6762 * If there is at least one provider registered, we'll add this 6763 * provider after the first provider. 6764 */ 6765 if (dtrace_provider != NULL) { 6766 provider->dtpv_next = dtrace_provider->dtpv_next; 6767 dtrace_provider->dtpv_next = provider; 6768 } else { 6769 dtrace_provider = provider; 6770 } 6771 6772 if (dtrace_retained != NULL) { 6773 dtrace_enabling_provide(provider); 6774 6775 /* 6776 * Now we need to call dtrace_enabling_matchall() -- which 6777 * will acquire cpu_lock and dtrace_lock. We therefore need 6778 * to drop all of our locks before calling into it... 6779 */ 6780 mutex_exit(&dtrace_lock); 6781 mutex_exit(&dtrace_provider_lock); 6782 dtrace_enabling_matchall(); 6783 6784 return (0); 6785 } 6786 6787 mutex_exit(&dtrace_lock); 6788 mutex_exit(&dtrace_provider_lock); 6789 6790 return (0); 6791 } 6792 6793 /* 6794 * Unregister the specified provider from the DTrace framework. This should 6795 * generally be called by DTrace providers in their detach(9E) entry point. 6796 */ 6797 int 6798 dtrace_unregister(dtrace_provider_id_t id) 6799 { 6800 dtrace_provider_t *old = (dtrace_provider_t *)id; 6801 dtrace_provider_t *prev = NULL; 6802 int i, self = 0; 6803 dtrace_probe_t *probe, *first = NULL; 6804 6805 if (old->dtpv_pops.dtps_enable == 6806 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 6807 /* 6808 * If DTrace itself is the provider, we're called with locks 6809 * already held. 6810 */ 6811 ASSERT(old == dtrace_provider); 6812 ASSERT(dtrace_devi != NULL); 6813 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6814 ASSERT(MUTEX_HELD(&dtrace_lock)); 6815 self = 1; 6816 6817 if (dtrace_provider->dtpv_next != NULL) { 6818 /* 6819 * There's another provider here; return failure. 6820 */ 6821 return (EBUSY); 6822 } 6823 } else { 6824 mutex_enter(&dtrace_provider_lock); 6825 mutex_enter(&mod_lock); 6826 mutex_enter(&dtrace_lock); 6827 } 6828 6829 /* 6830 * If anyone has /dev/dtrace open, or if there are anonymous enabled 6831 * probes, we refuse to let providers slither away, unless this 6832 * provider has already been explicitly invalidated. 6833 */ 6834 if (!old->dtpv_defunct && 6835 (dtrace_opens || (dtrace_anon.dta_state != NULL && 6836 dtrace_anon.dta_state->dts_necbs > 0))) { 6837 if (!self) { 6838 mutex_exit(&dtrace_lock); 6839 mutex_exit(&mod_lock); 6840 mutex_exit(&dtrace_provider_lock); 6841 } 6842 return (EBUSY); 6843 } 6844 6845 /* 6846 * Attempt to destroy the probes associated with this provider. 6847 */ 6848 for (i = 0; i < dtrace_nprobes; i++) { 6849 if ((probe = dtrace_probes[i]) == NULL) 6850 continue; 6851 6852 if (probe->dtpr_provider != old) 6853 continue; 6854 6855 if (probe->dtpr_ecb == NULL) 6856 continue; 6857 6858 /* 6859 * We have at least one ECB; we can't remove this provider. 6860 */ 6861 if (!self) { 6862 mutex_exit(&dtrace_lock); 6863 mutex_exit(&mod_lock); 6864 mutex_exit(&dtrace_provider_lock); 6865 } 6866 return (EBUSY); 6867 } 6868 6869 /* 6870 * All of the probes for this provider are disabled; we can safely 6871 * remove all of them from their hash chains and from the probe array. 6872 */ 6873 for (i = 0; i < dtrace_nprobes; i++) { 6874 if ((probe = dtrace_probes[i]) == NULL) 6875 continue; 6876 6877 if (probe->dtpr_provider != old) 6878 continue; 6879 6880 dtrace_probes[i] = NULL; 6881 6882 dtrace_hash_remove(dtrace_bymod, probe); 6883 dtrace_hash_remove(dtrace_byfunc, probe); 6884 dtrace_hash_remove(dtrace_byname, probe); 6885 6886 if (first == NULL) { 6887 first = probe; 6888 probe->dtpr_nextmod = NULL; 6889 } else { 6890 probe->dtpr_nextmod = first; 6891 first = probe; 6892 } 6893 } 6894 6895 /* 6896 * The provider's probes have been removed from the hash chains and 6897 * from the probe array. Now issue a dtrace_sync() to be sure that 6898 * everyone has cleared out from any probe array processing. 6899 */ 6900 dtrace_sync(); 6901 6902 for (probe = first; probe != NULL; probe = first) { 6903 first = probe->dtpr_nextmod; 6904 6905 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6906 probe->dtpr_arg); 6907 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6908 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6909 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6910 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6911 kmem_free(probe, sizeof (dtrace_probe_t)); 6912 } 6913 6914 if ((prev = dtrace_provider) == old) { 6915 ASSERT(self || dtrace_devi == NULL); 6916 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6917 dtrace_provider = old->dtpv_next; 6918 } else { 6919 while (prev != NULL && prev->dtpv_next != old) 6920 prev = prev->dtpv_next; 6921 6922 if (prev == NULL) { 6923 panic("attempt to unregister non-existent " 6924 "dtrace provider %p\n", (void *)id); 6925 } 6926 6927 prev->dtpv_next = old->dtpv_next; 6928 } 6929 6930 if (!self) { 6931 mutex_exit(&dtrace_lock); 6932 mutex_exit(&mod_lock); 6933 mutex_exit(&dtrace_provider_lock); 6934 } 6935 6936 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6937 kmem_free(old, sizeof (dtrace_provider_t)); 6938 6939 return (0); 6940 } 6941 6942 /* 6943 * Invalidate the specified provider. All subsequent probe lookups for the 6944 * specified provider will fail, but its probes will not be removed. 6945 */ 6946 void 6947 dtrace_invalidate(dtrace_provider_id_t id) 6948 { 6949 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6950 6951 ASSERT(pvp->dtpv_pops.dtps_enable != 6952 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6953 6954 mutex_enter(&dtrace_provider_lock); 6955 mutex_enter(&dtrace_lock); 6956 6957 pvp->dtpv_defunct = 1; 6958 6959 mutex_exit(&dtrace_lock); 6960 mutex_exit(&dtrace_provider_lock); 6961 } 6962 6963 /* 6964 * Indicate whether or not DTrace has attached. 6965 */ 6966 int 6967 dtrace_attached(void) 6968 { 6969 /* 6970 * dtrace_provider will be non-NULL iff the DTrace driver has 6971 * attached. (It's non-NULL because DTrace is always itself a 6972 * provider.) 6973 */ 6974 return (dtrace_provider != NULL); 6975 } 6976 6977 /* 6978 * Remove all the unenabled probes for the given provider. This function is 6979 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6980 * -- just as many of its associated probes as it can. 6981 */ 6982 int 6983 dtrace_condense(dtrace_provider_id_t id) 6984 { 6985 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6986 int i; 6987 dtrace_probe_t *probe; 6988 6989 /* 6990 * Make sure this isn't the dtrace provider itself. 6991 */ 6992 ASSERT(prov->dtpv_pops.dtps_enable != 6993 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6994 6995 mutex_enter(&dtrace_provider_lock); 6996 mutex_enter(&dtrace_lock); 6997 6998 /* 6999 * Attempt to destroy the probes associated with this provider. 7000 */ 7001 for (i = 0; i < dtrace_nprobes; i++) { 7002 if ((probe = dtrace_probes[i]) == NULL) 7003 continue; 7004 7005 if (probe->dtpr_provider != prov) 7006 continue; 7007 7008 if (probe->dtpr_ecb != NULL) 7009 continue; 7010 7011 dtrace_probes[i] = NULL; 7012 7013 dtrace_hash_remove(dtrace_bymod, probe); 7014 dtrace_hash_remove(dtrace_byfunc, probe); 7015 dtrace_hash_remove(dtrace_byname, probe); 7016 7017 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7018 probe->dtpr_arg); 7019 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7020 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7021 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7022 kmem_free(probe, sizeof (dtrace_probe_t)); 7023 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7024 } 7025 7026 mutex_exit(&dtrace_lock); 7027 mutex_exit(&dtrace_provider_lock); 7028 7029 return (0); 7030 } 7031 7032 /* 7033 * DTrace Probe Management Functions 7034 * 7035 * The functions in this section perform the DTrace probe management, 7036 * including functions to create probes, look-up probes, and call into the 7037 * providers to request that probes be provided. Some of these functions are 7038 * in the Provider-to-Framework API; these functions can be identified by the 7039 * fact that they are not declared "static". 7040 */ 7041 7042 /* 7043 * Create a probe with the specified module name, function name, and name. 7044 */ 7045 dtrace_id_t 7046 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7047 const char *func, const char *name, int aframes, void *arg) 7048 { 7049 dtrace_probe_t *probe, **probes; 7050 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7051 dtrace_id_t id; 7052 7053 if (provider == dtrace_provider) { 7054 ASSERT(MUTEX_HELD(&dtrace_lock)); 7055 } else { 7056 mutex_enter(&dtrace_lock); 7057 } 7058 7059 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7060 VM_BESTFIT | VM_SLEEP); 7061 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7062 7063 probe->dtpr_id = id; 7064 probe->dtpr_gen = dtrace_probegen++; 7065 probe->dtpr_mod = dtrace_strdup(mod); 7066 probe->dtpr_func = dtrace_strdup(func); 7067 probe->dtpr_name = dtrace_strdup(name); 7068 probe->dtpr_arg = arg; 7069 probe->dtpr_aframes = aframes; 7070 probe->dtpr_provider = provider; 7071 7072 dtrace_hash_add(dtrace_bymod, probe); 7073 dtrace_hash_add(dtrace_byfunc, probe); 7074 dtrace_hash_add(dtrace_byname, probe); 7075 7076 if (id - 1 >= dtrace_nprobes) { 7077 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7078 size_t nsize = osize << 1; 7079 7080 if (nsize == 0) { 7081 ASSERT(osize == 0); 7082 ASSERT(dtrace_probes == NULL); 7083 nsize = sizeof (dtrace_probe_t *); 7084 } 7085 7086 probes = kmem_zalloc(nsize, KM_SLEEP); 7087 7088 if (dtrace_probes == NULL) { 7089 ASSERT(osize == 0); 7090 dtrace_probes = probes; 7091 dtrace_nprobes = 1; 7092 } else { 7093 dtrace_probe_t **oprobes = dtrace_probes; 7094 7095 bcopy(oprobes, probes, osize); 7096 dtrace_membar_producer(); 7097 dtrace_probes = probes; 7098 7099 dtrace_sync(); 7100 7101 /* 7102 * All CPUs are now seeing the new probes array; we can 7103 * safely free the old array. 7104 */ 7105 kmem_free(oprobes, osize); 7106 dtrace_nprobes <<= 1; 7107 } 7108 7109 ASSERT(id - 1 < dtrace_nprobes); 7110 } 7111 7112 ASSERT(dtrace_probes[id - 1] == NULL); 7113 dtrace_probes[id - 1] = probe; 7114 7115 if (provider != dtrace_provider) 7116 mutex_exit(&dtrace_lock); 7117 7118 return (id); 7119 } 7120 7121 static dtrace_probe_t * 7122 dtrace_probe_lookup_id(dtrace_id_t id) 7123 { 7124 ASSERT(MUTEX_HELD(&dtrace_lock)); 7125 7126 if (id == 0 || id > dtrace_nprobes) 7127 return (NULL); 7128 7129 return (dtrace_probes[id - 1]); 7130 } 7131 7132 static int 7133 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7134 { 7135 *((dtrace_id_t *)arg) = probe->dtpr_id; 7136 7137 return (DTRACE_MATCH_DONE); 7138 } 7139 7140 /* 7141 * Look up a probe based on provider and one or more of module name, function 7142 * name and probe name. 7143 */ 7144 dtrace_id_t 7145 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 7146 const char *func, const char *name) 7147 { 7148 dtrace_probekey_t pkey; 7149 dtrace_id_t id; 7150 int match; 7151 7152 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7153 pkey.dtpk_pmatch = &dtrace_match_string; 7154 pkey.dtpk_mod = mod; 7155 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7156 pkey.dtpk_func = func; 7157 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7158 pkey.dtpk_name = name; 7159 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7160 pkey.dtpk_id = DTRACE_IDNONE; 7161 7162 mutex_enter(&dtrace_lock); 7163 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7164 dtrace_probe_lookup_match, &id); 7165 mutex_exit(&dtrace_lock); 7166 7167 ASSERT(match == 1 || match == 0); 7168 return (match ? id : 0); 7169 } 7170 7171 /* 7172 * Returns the probe argument associated with the specified probe. 7173 */ 7174 void * 7175 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7176 { 7177 dtrace_probe_t *probe; 7178 void *rval = NULL; 7179 7180 mutex_enter(&dtrace_lock); 7181 7182 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7183 probe->dtpr_provider == (dtrace_provider_t *)id) 7184 rval = probe->dtpr_arg; 7185 7186 mutex_exit(&dtrace_lock); 7187 7188 return (rval); 7189 } 7190 7191 /* 7192 * Copy a probe into a probe description. 7193 */ 7194 static void 7195 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7196 { 7197 bzero(pdp, sizeof (dtrace_probedesc_t)); 7198 pdp->dtpd_id = prp->dtpr_id; 7199 7200 (void) strncpy(pdp->dtpd_provider, 7201 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7202 7203 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7204 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7205 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7206 } 7207 7208 /* 7209 * Called to indicate that a probe -- or probes -- should be provided by a 7210 * specfied provider. If the specified description is NULL, the provider will 7211 * be told to provide all of its probes. (This is done whenever a new 7212 * consumer comes along, or whenever a retained enabling is to be matched.) If 7213 * the specified description is non-NULL, the provider is given the 7214 * opportunity to dynamically provide the specified probe, allowing providers 7215 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7216 * probes.) If the provider is NULL, the operations will be applied to all 7217 * providers; if the provider is non-NULL the operations will only be applied 7218 * to the specified provider. The dtrace_provider_lock must be held, and the 7219 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7220 * will need to grab the dtrace_lock when it reenters the framework through 7221 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7222 */ 7223 static void 7224 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7225 { 7226 struct modctl *ctl; 7227 int all = 0; 7228 7229 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7230 7231 if (prv == NULL) { 7232 all = 1; 7233 prv = dtrace_provider; 7234 } 7235 7236 do { 7237 /* 7238 * First, call the blanket provide operation. 7239 */ 7240 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7241 7242 /* 7243 * Now call the per-module provide operation. We will grab 7244 * mod_lock to prevent the list from being modified. Note 7245 * that this also prevents the mod_busy bits from changing. 7246 * (mod_busy can only be changed with mod_lock held.) 7247 */ 7248 mutex_enter(&mod_lock); 7249 7250 ctl = &modules; 7251 do { 7252 if (ctl->mod_busy || ctl->mod_mp == NULL) 7253 continue; 7254 7255 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7256 7257 } while ((ctl = ctl->mod_next) != &modules); 7258 7259 mutex_exit(&mod_lock); 7260 } while (all && (prv = prv->dtpv_next) != NULL); 7261 } 7262 7263 /* 7264 * Iterate over each probe, and call the Framework-to-Provider API function 7265 * denoted by offs. 7266 */ 7267 static void 7268 dtrace_probe_foreach(uintptr_t offs) 7269 { 7270 dtrace_provider_t *prov; 7271 void (*func)(void *, dtrace_id_t, void *); 7272 dtrace_probe_t *probe; 7273 dtrace_icookie_t cookie; 7274 int i; 7275 7276 /* 7277 * We disable interrupts to walk through the probe array. This is 7278 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7279 * won't see stale data. 7280 */ 7281 cookie = dtrace_interrupt_disable(); 7282 7283 for (i = 0; i < dtrace_nprobes; i++) { 7284 if ((probe = dtrace_probes[i]) == NULL) 7285 continue; 7286 7287 if (probe->dtpr_ecb == NULL) { 7288 /* 7289 * This probe isn't enabled -- don't call the function. 7290 */ 7291 continue; 7292 } 7293 7294 prov = probe->dtpr_provider; 7295 func = *((void(**)(void *, dtrace_id_t, void *)) 7296 ((uintptr_t)&prov->dtpv_pops + offs)); 7297 7298 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7299 } 7300 7301 dtrace_interrupt_enable(cookie); 7302 } 7303 7304 static int 7305 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7306 { 7307 dtrace_probekey_t pkey; 7308 uint32_t priv; 7309 uid_t uid; 7310 zoneid_t zoneid; 7311 7312 ASSERT(MUTEX_HELD(&dtrace_lock)); 7313 dtrace_ecb_create_cache = NULL; 7314 7315 if (desc == NULL) { 7316 /* 7317 * If we're passed a NULL description, we're being asked to 7318 * create an ECB with a NULL probe. 7319 */ 7320 (void) dtrace_ecb_create_enable(NULL, enab); 7321 return (0); 7322 } 7323 7324 dtrace_probekey(desc, &pkey); 7325 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7326 &priv, &uid, &zoneid); 7327 7328 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7329 enab)); 7330 } 7331 7332 /* 7333 * DTrace Helper Provider Functions 7334 */ 7335 static void 7336 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7337 { 7338 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7339 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7340 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7341 } 7342 7343 static void 7344 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7345 const dof_provider_t *dofprov, char *strtab) 7346 { 7347 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7348 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7349 dofprov->dofpv_provattr); 7350 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7351 dofprov->dofpv_modattr); 7352 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7353 dofprov->dofpv_funcattr); 7354 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7355 dofprov->dofpv_nameattr); 7356 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7357 dofprov->dofpv_argsattr); 7358 } 7359 7360 static void 7361 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7362 { 7363 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7364 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7365 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7366 dof_provider_t *provider; 7367 dof_probe_t *probe; 7368 uint32_t *off, *enoff; 7369 uint8_t *arg; 7370 char *strtab; 7371 uint_t i, nprobes; 7372 dtrace_helper_provdesc_t dhpv; 7373 dtrace_helper_probedesc_t dhpb; 7374 dtrace_meta_t *meta = dtrace_meta_pid; 7375 dtrace_mops_t *mops = &meta->dtm_mops; 7376 void *parg; 7377 7378 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7379 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7380 provider->dofpv_strtab * dof->dofh_secsize); 7381 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7382 provider->dofpv_probes * dof->dofh_secsize); 7383 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7384 provider->dofpv_prargs * dof->dofh_secsize); 7385 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7386 provider->dofpv_proffs * dof->dofh_secsize); 7387 7388 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7389 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7390 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7391 enoff = NULL; 7392 7393 /* 7394 * See dtrace_helper_provider_validate(). 7395 */ 7396 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7397 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7398 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7399 provider->dofpv_prenoffs * dof->dofh_secsize); 7400 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 7401 } 7402 7403 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 7404 7405 /* 7406 * Create the provider. 7407 */ 7408 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7409 7410 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 7411 return; 7412 7413 meta->dtm_count++; 7414 7415 /* 7416 * Create the probes. 7417 */ 7418 for (i = 0; i < nprobes; i++) { 7419 probe = (dof_probe_t *)(uintptr_t)(daddr + 7420 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 7421 7422 dhpb.dthpb_mod = dhp->dofhp_mod; 7423 dhpb.dthpb_func = strtab + probe->dofpr_func; 7424 dhpb.dthpb_name = strtab + probe->dofpr_name; 7425 dhpb.dthpb_base = probe->dofpr_addr; 7426 dhpb.dthpb_offs = off + probe->dofpr_offidx; 7427 dhpb.dthpb_noffs = probe->dofpr_noffs; 7428 if (enoff != NULL) { 7429 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 7430 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 7431 } else { 7432 dhpb.dthpb_enoffs = NULL; 7433 dhpb.dthpb_nenoffs = 0; 7434 } 7435 dhpb.dthpb_args = arg + probe->dofpr_argidx; 7436 dhpb.dthpb_nargc = probe->dofpr_nargc; 7437 dhpb.dthpb_xargc = probe->dofpr_xargc; 7438 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 7439 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 7440 7441 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 7442 } 7443 } 7444 7445 static void 7446 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 7447 { 7448 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7449 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7450 int i; 7451 7452 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7453 7454 for (i = 0; i < dof->dofh_secnum; i++) { 7455 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7456 dof->dofh_secoff + i * dof->dofh_secsize); 7457 7458 if (sec->dofs_type != DOF_SECT_PROVIDER) 7459 continue; 7460 7461 dtrace_helper_provide_one(dhp, sec, pid); 7462 } 7463 7464 /* 7465 * We may have just created probes, so we must now rematch against 7466 * any retained enablings. Note that this call will acquire both 7467 * cpu_lock and dtrace_lock; the fact that we are holding 7468 * dtrace_meta_lock now is what defines the ordering with respect to 7469 * these three locks. 7470 */ 7471 dtrace_enabling_matchall(); 7472 } 7473 7474 static void 7475 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7476 { 7477 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7478 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7479 dof_sec_t *str_sec; 7480 dof_provider_t *provider; 7481 char *strtab; 7482 dtrace_helper_provdesc_t dhpv; 7483 dtrace_meta_t *meta = dtrace_meta_pid; 7484 dtrace_mops_t *mops = &meta->dtm_mops; 7485 7486 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7487 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7488 provider->dofpv_strtab * dof->dofh_secsize); 7489 7490 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7491 7492 /* 7493 * Create the provider. 7494 */ 7495 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7496 7497 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 7498 7499 meta->dtm_count--; 7500 } 7501 7502 static void 7503 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 7504 { 7505 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7506 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7507 int i; 7508 7509 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7510 7511 for (i = 0; i < dof->dofh_secnum; i++) { 7512 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7513 dof->dofh_secoff + i * dof->dofh_secsize); 7514 7515 if (sec->dofs_type != DOF_SECT_PROVIDER) 7516 continue; 7517 7518 dtrace_helper_provider_remove_one(dhp, sec, pid); 7519 } 7520 } 7521 7522 /* 7523 * DTrace Meta Provider-to-Framework API Functions 7524 * 7525 * These functions implement the Meta Provider-to-Framework API, as described 7526 * in <sys/dtrace.h>. 7527 */ 7528 int 7529 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 7530 dtrace_meta_provider_id_t *idp) 7531 { 7532 dtrace_meta_t *meta; 7533 dtrace_helpers_t *help, *next; 7534 int i; 7535 7536 *idp = DTRACE_METAPROVNONE; 7537 7538 /* 7539 * We strictly don't need the name, but we hold onto it for 7540 * debuggability. All hail error queues! 7541 */ 7542 if (name == NULL) { 7543 cmn_err(CE_WARN, "failed to register meta-provider: " 7544 "invalid name"); 7545 return (EINVAL); 7546 } 7547 7548 if (mops == NULL || 7549 mops->dtms_create_probe == NULL || 7550 mops->dtms_provide_pid == NULL || 7551 mops->dtms_remove_pid == NULL) { 7552 cmn_err(CE_WARN, "failed to register meta-register %s: " 7553 "invalid ops", name); 7554 return (EINVAL); 7555 } 7556 7557 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 7558 meta->dtm_mops = *mops; 7559 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7560 (void) strcpy(meta->dtm_name, name); 7561 meta->dtm_arg = arg; 7562 7563 mutex_enter(&dtrace_meta_lock); 7564 mutex_enter(&dtrace_lock); 7565 7566 if (dtrace_meta_pid != NULL) { 7567 mutex_exit(&dtrace_lock); 7568 mutex_exit(&dtrace_meta_lock); 7569 cmn_err(CE_WARN, "failed to register meta-register %s: " 7570 "user-land meta-provider exists", name); 7571 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7572 kmem_free(meta, sizeof (dtrace_meta_t)); 7573 return (EINVAL); 7574 } 7575 7576 dtrace_meta_pid = meta; 7577 *idp = (dtrace_meta_provider_id_t)meta; 7578 7579 /* 7580 * If there are providers and probes ready to go, pass them 7581 * off to the new meta provider now. 7582 */ 7583 7584 help = dtrace_deferred_pid; 7585 dtrace_deferred_pid = NULL; 7586 7587 mutex_exit(&dtrace_lock); 7588 7589 while (help != NULL) { 7590 for (i = 0; i < help->dthps_nprovs; i++) { 7591 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7592 help->dthps_pid); 7593 } 7594 7595 next = help->dthps_next; 7596 help->dthps_next = NULL; 7597 help->dthps_prev = NULL; 7598 help->dthps_deferred = 0; 7599 help = next; 7600 } 7601 7602 mutex_exit(&dtrace_meta_lock); 7603 7604 return (0); 7605 } 7606 7607 int 7608 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7609 { 7610 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7611 7612 mutex_enter(&dtrace_meta_lock); 7613 mutex_enter(&dtrace_lock); 7614 7615 if (old == dtrace_meta_pid) { 7616 pp = &dtrace_meta_pid; 7617 } else { 7618 panic("attempt to unregister non-existent " 7619 "dtrace meta-provider %p\n", (void *)old); 7620 } 7621 7622 if (old->dtm_count != 0) { 7623 mutex_exit(&dtrace_lock); 7624 mutex_exit(&dtrace_meta_lock); 7625 return (EBUSY); 7626 } 7627 7628 *pp = NULL; 7629 7630 mutex_exit(&dtrace_lock); 7631 mutex_exit(&dtrace_meta_lock); 7632 7633 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7634 kmem_free(old, sizeof (dtrace_meta_t)); 7635 7636 return (0); 7637 } 7638 7639 7640 /* 7641 * DTrace DIF Object Functions 7642 */ 7643 static int 7644 dtrace_difo_err(uint_t pc, const char *format, ...) 7645 { 7646 if (dtrace_err_verbose) { 7647 va_list alist; 7648 7649 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7650 va_start(alist, format); 7651 (void) vuprintf(format, alist); 7652 va_end(alist); 7653 } 7654 7655 #ifdef DTRACE_ERRDEBUG 7656 dtrace_errdebug(format); 7657 #endif 7658 return (1); 7659 } 7660 7661 /* 7662 * Validate a DTrace DIF object by checking the IR instructions. The following 7663 * rules are currently enforced by dtrace_difo_validate(): 7664 * 7665 * 1. Each instruction must have a valid opcode 7666 * 2. Each register, string, variable, or subroutine reference must be valid 7667 * 3. No instruction can modify register %r0 (must be zero) 7668 * 4. All instruction reserved bits must be set to zero 7669 * 5. The last instruction must be a "ret" instruction 7670 * 6. All branch targets must reference a valid instruction _after_ the branch 7671 */ 7672 static int 7673 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7674 cred_t *cr) 7675 { 7676 int err = 0, i; 7677 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7678 int kcheckload; 7679 uint_t pc; 7680 7681 kcheckload = cr == NULL || 7682 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 7683 7684 dp->dtdo_destructive = 0; 7685 7686 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 7687 dif_instr_t instr = dp->dtdo_buf[pc]; 7688 7689 uint_t r1 = DIF_INSTR_R1(instr); 7690 uint_t r2 = DIF_INSTR_R2(instr); 7691 uint_t rd = DIF_INSTR_RD(instr); 7692 uint_t rs = DIF_INSTR_RS(instr); 7693 uint_t label = DIF_INSTR_LABEL(instr); 7694 uint_t v = DIF_INSTR_VAR(instr); 7695 uint_t subr = DIF_INSTR_SUBR(instr); 7696 uint_t type = DIF_INSTR_TYPE(instr); 7697 uint_t op = DIF_INSTR_OP(instr); 7698 7699 switch (op) { 7700 case DIF_OP_OR: 7701 case DIF_OP_XOR: 7702 case DIF_OP_AND: 7703 case DIF_OP_SLL: 7704 case DIF_OP_SRL: 7705 case DIF_OP_SRA: 7706 case DIF_OP_SUB: 7707 case DIF_OP_ADD: 7708 case DIF_OP_MUL: 7709 case DIF_OP_SDIV: 7710 case DIF_OP_UDIV: 7711 case DIF_OP_SREM: 7712 case DIF_OP_UREM: 7713 case DIF_OP_COPYS: 7714 if (r1 >= nregs) 7715 err += efunc(pc, "invalid register %u\n", r1); 7716 if (r2 >= nregs) 7717 err += efunc(pc, "invalid register %u\n", r2); 7718 if (rd >= nregs) 7719 err += efunc(pc, "invalid register %u\n", rd); 7720 if (rd == 0) 7721 err += efunc(pc, "cannot write to %r0\n"); 7722 break; 7723 case DIF_OP_NOT: 7724 case DIF_OP_MOV: 7725 case DIF_OP_ALLOCS: 7726 if (r1 >= nregs) 7727 err += efunc(pc, "invalid register %u\n", r1); 7728 if (r2 != 0) 7729 err += efunc(pc, "non-zero reserved bits\n"); 7730 if (rd >= nregs) 7731 err += efunc(pc, "invalid register %u\n", rd); 7732 if (rd == 0) 7733 err += efunc(pc, "cannot write to %r0\n"); 7734 break; 7735 case DIF_OP_LDSB: 7736 case DIF_OP_LDSH: 7737 case DIF_OP_LDSW: 7738 case DIF_OP_LDUB: 7739 case DIF_OP_LDUH: 7740 case DIF_OP_LDUW: 7741 case DIF_OP_LDX: 7742 if (r1 >= nregs) 7743 err += efunc(pc, "invalid register %u\n", r1); 7744 if (r2 != 0) 7745 err += efunc(pc, "non-zero reserved bits\n"); 7746 if (rd >= nregs) 7747 err += efunc(pc, "invalid register %u\n", rd); 7748 if (rd == 0) 7749 err += efunc(pc, "cannot write to %r0\n"); 7750 if (kcheckload) 7751 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 7752 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 7753 break; 7754 case DIF_OP_RLDSB: 7755 case DIF_OP_RLDSH: 7756 case DIF_OP_RLDSW: 7757 case DIF_OP_RLDUB: 7758 case DIF_OP_RLDUH: 7759 case DIF_OP_RLDUW: 7760 case DIF_OP_RLDX: 7761 if (r1 >= nregs) 7762 err += efunc(pc, "invalid register %u\n", r1); 7763 if (r2 != 0) 7764 err += efunc(pc, "non-zero reserved bits\n"); 7765 if (rd >= nregs) 7766 err += efunc(pc, "invalid register %u\n", rd); 7767 if (rd == 0) 7768 err += efunc(pc, "cannot write to %r0\n"); 7769 break; 7770 case DIF_OP_ULDSB: 7771 case DIF_OP_ULDSH: 7772 case DIF_OP_ULDSW: 7773 case DIF_OP_ULDUB: 7774 case DIF_OP_ULDUH: 7775 case DIF_OP_ULDUW: 7776 case DIF_OP_ULDX: 7777 if (r1 >= nregs) 7778 err += efunc(pc, "invalid register %u\n", r1); 7779 if (r2 != 0) 7780 err += efunc(pc, "non-zero reserved bits\n"); 7781 if (rd >= nregs) 7782 err += efunc(pc, "invalid register %u\n", rd); 7783 if (rd == 0) 7784 err += efunc(pc, "cannot write to %r0\n"); 7785 break; 7786 case DIF_OP_STB: 7787 case DIF_OP_STH: 7788 case DIF_OP_STW: 7789 case DIF_OP_STX: 7790 if (r1 >= nregs) 7791 err += efunc(pc, "invalid register %u\n", r1); 7792 if (r2 != 0) 7793 err += efunc(pc, "non-zero reserved bits\n"); 7794 if (rd >= nregs) 7795 err += efunc(pc, "invalid register %u\n", rd); 7796 if (rd == 0) 7797 err += efunc(pc, "cannot write to 0 address\n"); 7798 break; 7799 case DIF_OP_CMP: 7800 case DIF_OP_SCMP: 7801 if (r1 >= nregs) 7802 err += efunc(pc, "invalid register %u\n", r1); 7803 if (r2 >= nregs) 7804 err += efunc(pc, "invalid register %u\n", r2); 7805 if (rd != 0) 7806 err += efunc(pc, "non-zero reserved bits\n"); 7807 break; 7808 case DIF_OP_TST: 7809 if (r1 >= nregs) 7810 err += efunc(pc, "invalid register %u\n", r1); 7811 if (r2 != 0 || rd != 0) 7812 err += efunc(pc, "non-zero reserved bits\n"); 7813 break; 7814 case DIF_OP_BA: 7815 case DIF_OP_BE: 7816 case DIF_OP_BNE: 7817 case DIF_OP_BG: 7818 case DIF_OP_BGU: 7819 case DIF_OP_BGE: 7820 case DIF_OP_BGEU: 7821 case DIF_OP_BL: 7822 case DIF_OP_BLU: 7823 case DIF_OP_BLE: 7824 case DIF_OP_BLEU: 7825 if (label >= dp->dtdo_len) { 7826 err += efunc(pc, "invalid branch target %u\n", 7827 label); 7828 } 7829 if (label <= pc) { 7830 err += efunc(pc, "backward branch to %u\n", 7831 label); 7832 } 7833 break; 7834 case DIF_OP_RET: 7835 if (r1 != 0 || r2 != 0) 7836 err += efunc(pc, "non-zero reserved bits\n"); 7837 if (rd >= nregs) 7838 err += efunc(pc, "invalid register %u\n", rd); 7839 break; 7840 case DIF_OP_NOP: 7841 case DIF_OP_POPTS: 7842 case DIF_OP_FLUSHTS: 7843 if (r1 != 0 || r2 != 0 || rd != 0) 7844 err += efunc(pc, "non-zero reserved bits\n"); 7845 break; 7846 case DIF_OP_SETX: 7847 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 7848 err += efunc(pc, "invalid integer ref %u\n", 7849 DIF_INSTR_INTEGER(instr)); 7850 } 7851 if (rd >= nregs) 7852 err += efunc(pc, "invalid register %u\n", rd); 7853 if (rd == 0) 7854 err += efunc(pc, "cannot write to %r0\n"); 7855 break; 7856 case DIF_OP_SETS: 7857 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 7858 err += efunc(pc, "invalid string ref %u\n", 7859 DIF_INSTR_STRING(instr)); 7860 } 7861 if (rd >= nregs) 7862 err += efunc(pc, "invalid register %u\n", rd); 7863 if (rd == 0) 7864 err += efunc(pc, "cannot write to %r0\n"); 7865 break; 7866 case DIF_OP_LDGA: 7867 case DIF_OP_LDTA: 7868 if (r1 > DIF_VAR_ARRAY_MAX) 7869 err += efunc(pc, "invalid array %u\n", r1); 7870 if (r2 >= nregs) 7871 err += efunc(pc, "invalid register %u\n", r2); 7872 if (rd >= nregs) 7873 err += efunc(pc, "invalid register %u\n", rd); 7874 if (rd == 0) 7875 err += efunc(pc, "cannot write to %r0\n"); 7876 break; 7877 case DIF_OP_LDGS: 7878 case DIF_OP_LDTS: 7879 case DIF_OP_LDLS: 7880 case DIF_OP_LDGAA: 7881 case DIF_OP_LDTAA: 7882 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 7883 err += efunc(pc, "invalid variable %u\n", v); 7884 if (rd >= nregs) 7885 err += efunc(pc, "invalid register %u\n", rd); 7886 if (rd == 0) 7887 err += efunc(pc, "cannot write to %r0\n"); 7888 break; 7889 case DIF_OP_STGS: 7890 case DIF_OP_STTS: 7891 case DIF_OP_STLS: 7892 case DIF_OP_STGAA: 7893 case DIF_OP_STTAA: 7894 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 7895 err += efunc(pc, "invalid variable %u\n", v); 7896 if (rs >= nregs) 7897 err += efunc(pc, "invalid register %u\n", rd); 7898 break; 7899 case DIF_OP_CALL: 7900 if (subr > DIF_SUBR_MAX) 7901 err += efunc(pc, "invalid subr %u\n", subr); 7902 if (rd >= nregs) 7903 err += efunc(pc, "invalid register %u\n", rd); 7904 if (rd == 0) 7905 err += efunc(pc, "cannot write to %r0\n"); 7906 7907 if (subr == DIF_SUBR_COPYOUT || 7908 subr == DIF_SUBR_COPYOUTSTR) { 7909 dp->dtdo_destructive = 1; 7910 } 7911 break; 7912 case DIF_OP_PUSHTR: 7913 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 7914 err += efunc(pc, "invalid ref type %u\n", type); 7915 if (r2 >= nregs) 7916 err += efunc(pc, "invalid register %u\n", r2); 7917 if (rs >= nregs) 7918 err += efunc(pc, "invalid register %u\n", rs); 7919 break; 7920 case DIF_OP_PUSHTV: 7921 if (type != DIF_TYPE_CTF) 7922 err += efunc(pc, "invalid val type %u\n", type); 7923 if (r2 >= nregs) 7924 err += efunc(pc, "invalid register %u\n", r2); 7925 if (rs >= nregs) 7926 err += efunc(pc, "invalid register %u\n", rs); 7927 break; 7928 default: 7929 err += efunc(pc, "invalid opcode %u\n", 7930 DIF_INSTR_OP(instr)); 7931 } 7932 } 7933 7934 if (dp->dtdo_len != 0 && 7935 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7936 err += efunc(dp->dtdo_len - 1, 7937 "expected 'ret' as last DIF instruction\n"); 7938 } 7939 7940 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7941 /* 7942 * If we're not returning by reference, the size must be either 7943 * 0 or the size of one of the base types. 7944 */ 7945 switch (dp->dtdo_rtype.dtdt_size) { 7946 case 0: 7947 case sizeof (uint8_t): 7948 case sizeof (uint16_t): 7949 case sizeof (uint32_t): 7950 case sizeof (uint64_t): 7951 break; 7952 7953 default: 7954 err += efunc(dp->dtdo_len - 1, "bad return size"); 7955 } 7956 } 7957 7958 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7959 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7960 dtrace_diftype_t *vt, *et; 7961 uint_t id, ndx; 7962 7963 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7964 v->dtdv_scope != DIFV_SCOPE_THREAD && 7965 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7966 err += efunc(i, "unrecognized variable scope %d\n", 7967 v->dtdv_scope); 7968 break; 7969 } 7970 7971 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7972 v->dtdv_kind != DIFV_KIND_SCALAR) { 7973 err += efunc(i, "unrecognized variable type %d\n", 7974 v->dtdv_kind); 7975 break; 7976 } 7977 7978 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7979 err += efunc(i, "%d exceeds variable id limit\n", id); 7980 break; 7981 } 7982 7983 if (id < DIF_VAR_OTHER_UBASE) 7984 continue; 7985 7986 /* 7987 * For user-defined variables, we need to check that this 7988 * definition is identical to any previous definition that we 7989 * encountered. 7990 */ 7991 ndx = id - DIF_VAR_OTHER_UBASE; 7992 7993 switch (v->dtdv_scope) { 7994 case DIFV_SCOPE_GLOBAL: 7995 if (ndx < vstate->dtvs_nglobals) { 7996 dtrace_statvar_t *svar; 7997 7998 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7999 existing = &svar->dtsv_var; 8000 } 8001 8002 break; 8003 8004 case DIFV_SCOPE_THREAD: 8005 if (ndx < vstate->dtvs_ntlocals) 8006 existing = &vstate->dtvs_tlocals[ndx]; 8007 break; 8008 8009 case DIFV_SCOPE_LOCAL: 8010 if (ndx < vstate->dtvs_nlocals) { 8011 dtrace_statvar_t *svar; 8012 8013 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8014 existing = &svar->dtsv_var; 8015 } 8016 8017 break; 8018 } 8019 8020 vt = &v->dtdv_type; 8021 8022 if (vt->dtdt_flags & DIF_TF_BYREF) { 8023 if (vt->dtdt_size == 0) { 8024 err += efunc(i, "zero-sized variable\n"); 8025 break; 8026 } 8027 8028 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8029 vt->dtdt_size > dtrace_global_maxsize) { 8030 err += efunc(i, "oversized by-ref global\n"); 8031 break; 8032 } 8033 } 8034 8035 if (existing == NULL || existing->dtdv_id == 0) 8036 continue; 8037 8038 ASSERT(existing->dtdv_id == v->dtdv_id); 8039 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8040 8041 if (existing->dtdv_kind != v->dtdv_kind) 8042 err += efunc(i, "%d changed variable kind\n", id); 8043 8044 et = &existing->dtdv_type; 8045 8046 if (vt->dtdt_flags != et->dtdt_flags) { 8047 err += efunc(i, "%d changed variable type flags\n", id); 8048 break; 8049 } 8050 8051 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8052 err += efunc(i, "%d changed variable type size\n", id); 8053 break; 8054 } 8055 } 8056 8057 return (err); 8058 } 8059 8060 /* 8061 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8062 * are much more constrained than normal DIFOs. Specifically, they may 8063 * not: 8064 * 8065 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8066 * miscellaneous string routines 8067 * 2. Access DTrace variables other than the args[] array, and the 8068 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8069 * 3. Have thread-local variables. 8070 * 4. Have dynamic variables. 8071 */ 8072 static int 8073 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8074 { 8075 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8076 int err = 0; 8077 uint_t pc; 8078 8079 for (pc = 0; pc < dp->dtdo_len; pc++) { 8080 dif_instr_t instr = dp->dtdo_buf[pc]; 8081 8082 uint_t v = DIF_INSTR_VAR(instr); 8083 uint_t subr = DIF_INSTR_SUBR(instr); 8084 uint_t op = DIF_INSTR_OP(instr); 8085 8086 switch (op) { 8087 case DIF_OP_OR: 8088 case DIF_OP_XOR: 8089 case DIF_OP_AND: 8090 case DIF_OP_SLL: 8091 case DIF_OP_SRL: 8092 case DIF_OP_SRA: 8093 case DIF_OP_SUB: 8094 case DIF_OP_ADD: 8095 case DIF_OP_MUL: 8096 case DIF_OP_SDIV: 8097 case DIF_OP_UDIV: 8098 case DIF_OP_SREM: 8099 case DIF_OP_UREM: 8100 case DIF_OP_COPYS: 8101 case DIF_OP_NOT: 8102 case DIF_OP_MOV: 8103 case DIF_OP_RLDSB: 8104 case DIF_OP_RLDSH: 8105 case DIF_OP_RLDSW: 8106 case DIF_OP_RLDUB: 8107 case DIF_OP_RLDUH: 8108 case DIF_OP_RLDUW: 8109 case DIF_OP_RLDX: 8110 case DIF_OP_ULDSB: 8111 case DIF_OP_ULDSH: 8112 case DIF_OP_ULDSW: 8113 case DIF_OP_ULDUB: 8114 case DIF_OP_ULDUH: 8115 case DIF_OP_ULDUW: 8116 case DIF_OP_ULDX: 8117 case DIF_OP_STB: 8118 case DIF_OP_STH: 8119 case DIF_OP_STW: 8120 case DIF_OP_STX: 8121 case DIF_OP_ALLOCS: 8122 case DIF_OP_CMP: 8123 case DIF_OP_SCMP: 8124 case DIF_OP_TST: 8125 case DIF_OP_BA: 8126 case DIF_OP_BE: 8127 case DIF_OP_BNE: 8128 case DIF_OP_BG: 8129 case DIF_OP_BGU: 8130 case DIF_OP_BGE: 8131 case DIF_OP_BGEU: 8132 case DIF_OP_BL: 8133 case DIF_OP_BLU: 8134 case DIF_OP_BLE: 8135 case DIF_OP_BLEU: 8136 case DIF_OP_RET: 8137 case DIF_OP_NOP: 8138 case DIF_OP_POPTS: 8139 case DIF_OP_FLUSHTS: 8140 case DIF_OP_SETX: 8141 case DIF_OP_SETS: 8142 case DIF_OP_LDGA: 8143 case DIF_OP_LDLS: 8144 case DIF_OP_STGS: 8145 case DIF_OP_STLS: 8146 case DIF_OP_PUSHTR: 8147 case DIF_OP_PUSHTV: 8148 break; 8149 8150 case DIF_OP_LDGS: 8151 if (v >= DIF_VAR_OTHER_UBASE) 8152 break; 8153 8154 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8155 break; 8156 8157 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8158 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8159 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8160 v == DIF_VAR_UID || v == DIF_VAR_GID) 8161 break; 8162 8163 err += efunc(pc, "illegal variable %u\n", v); 8164 break; 8165 8166 case DIF_OP_LDTA: 8167 case DIF_OP_LDTS: 8168 case DIF_OP_LDGAA: 8169 case DIF_OP_LDTAA: 8170 err += efunc(pc, "illegal dynamic variable load\n"); 8171 break; 8172 8173 case DIF_OP_STTS: 8174 case DIF_OP_STGAA: 8175 case DIF_OP_STTAA: 8176 err += efunc(pc, "illegal dynamic variable store\n"); 8177 break; 8178 8179 case DIF_OP_CALL: 8180 if (subr == DIF_SUBR_ALLOCA || 8181 subr == DIF_SUBR_BCOPY || 8182 subr == DIF_SUBR_COPYIN || 8183 subr == DIF_SUBR_COPYINTO || 8184 subr == DIF_SUBR_COPYINSTR || 8185 subr == DIF_SUBR_INDEX || 8186 subr == DIF_SUBR_INET_NTOA || 8187 subr == DIF_SUBR_INET_NTOA6 || 8188 subr == DIF_SUBR_INET_NTOP || 8189 subr == DIF_SUBR_LLTOSTR || 8190 subr == DIF_SUBR_RINDEX || 8191 subr == DIF_SUBR_STRCHR || 8192 subr == DIF_SUBR_STRJOIN || 8193 subr == DIF_SUBR_STRRCHR || 8194 subr == DIF_SUBR_STRSTR || 8195 subr == DIF_SUBR_HTONS || 8196 subr == DIF_SUBR_HTONL || 8197 subr == DIF_SUBR_HTONLL || 8198 subr == DIF_SUBR_NTOHS || 8199 subr == DIF_SUBR_NTOHL || 8200 subr == DIF_SUBR_NTOHLL) 8201 break; 8202 8203 err += efunc(pc, "invalid subr %u\n", subr); 8204 break; 8205 8206 default: 8207 err += efunc(pc, "invalid opcode %u\n", 8208 DIF_INSTR_OP(instr)); 8209 } 8210 } 8211 8212 return (err); 8213 } 8214 8215 /* 8216 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8217 * basis; 0 if not. 8218 */ 8219 static int 8220 dtrace_difo_cacheable(dtrace_difo_t *dp) 8221 { 8222 int i; 8223 8224 if (dp == NULL) 8225 return (0); 8226 8227 for (i = 0; i < dp->dtdo_varlen; i++) { 8228 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8229 8230 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8231 continue; 8232 8233 switch (v->dtdv_id) { 8234 case DIF_VAR_CURTHREAD: 8235 case DIF_VAR_PID: 8236 case DIF_VAR_TID: 8237 case DIF_VAR_EXECNAME: 8238 case DIF_VAR_ZONENAME: 8239 break; 8240 8241 default: 8242 return (0); 8243 } 8244 } 8245 8246 /* 8247 * This DIF object may be cacheable. Now we need to look for any 8248 * array loading instructions, any memory loading instructions, or 8249 * any stores to thread-local variables. 8250 */ 8251 for (i = 0; i < dp->dtdo_len; i++) { 8252 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8253 8254 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8255 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8256 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8257 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8258 return (0); 8259 } 8260 8261 return (1); 8262 } 8263 8264 static void 8265 dtrace_difo_hold(dtrace_difo_t *dp) 8266 { 8267 int i; 8268 8269 ASSERT(MUTEX_HELD(&dtrace_lock)); 8270 8271 dp->dtdo_refcnt++; 8272 ASSERT(dp->dtdo_refcnt != 0); 8273 8274 /* 8275 * We need to check this DIF object for references to the variable 8276 * DIF_VAR_VTIMESTAMP. 8277 */ 8278 for (i = 0; i < dp->dtdo_varlen; i++) { 8279 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8280 8281 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8282 continue; 8283 8284 if (dtrace_vtime_references++ == 0) 8285 dtrace_vtime_enable(); 8286 } 8287 } 8288 8289 /* 8290 * This routine calculates the dynamic variable chunksize for a given DIF 8291 * object. The calculation is not fool-proof, and can probably be tricked by 8292 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8293 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8294 * if a dynamic variable size exceeds the chunksize. 8295 */ 8296 static void 8297 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8298 { 8299 uint64_t sval; 8300 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8301 const dif_instr_t *text = dp->dtdo_buf; 8302 uint_t pc, srd = 0; 8303 uint_t ttop = 0; 8304 size_t size, ksize; 8305 uint_t id, i; 8306 8307 for (pc = 0; pc < dp->dtdo_len; pc++) { 8308 dif_instr_t instr = text[pc]; 8309 uint_t op = DIF_INSTR_OP(instr); 8310 uint_t rd = DIF_INSTR_RD(instr); 8311 uint_t r1 = DIF_INSTR_R1(instr); 8312 uint_t nkeys = 0; 8313 uchar_t scope; 8314 8315 dtrace_key_t *key = tupregs; 8316 8317 switch (op) { 8318 case DIF_OP_SETX: 8319 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8320 srd = rd; 8321 continue; 8322 8323 case DIF_OP_STTS: 8324 key = &tupregs[DIF_DTR_NREGS]; 8325 key[0].dttk_size = 0; 8326 key[1].dttk_size = 0; 8327 nkeys = 2; 8328 scope = DIFV_SCOPE_THREAD; 8329 break; 8330 8331 case DIF_OP_STGAA: 8332 case DIF_OP_STTAA: 8333 nkeys = ttop; 8334 8335 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8336 key[nkeys++].dttk_size = 0; 8337 8338 key[nkeys++].dttk_size = 0; 8339 8340 if (op == DIF_OP_STTAA) { 8341 scope = DIFV_SCOPE_THREAD; 8342 } else { 8343 scope = DIFV_SCOPE_GLOBAL; 8344 } 8345 8346 break; 8347 8348 case DIF_OP_PUSHTR: 8349 if (ttop == DIF_DTR_NREGS) 8350 return; 8351 8352 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8353 /* 8354 * If the register for the size of the "pushtr" 8355 * is %r0 (or the value is 0) and the type is 8356 * a string, we'll use the system-wide default 8357 * string size. 8358 */ 8359 tupregs[ttop++].dttk_size = 8360 dtrace_strsize_default; 8361 } else { 8362 if (srd == 0) 8363 return; 8364 8365 tupregs[ttop++].dttk_size = sval; 8366 } 8367 8368 break; 8369 8370 case DIF_OP_PUSHTV: 8371 if (ttop == DIF_DTR_NREGS) 8372 return; 8373 8374 tupregs[ttop++].dttk_size = 0; 8375 break; 8376 8377 case DIF_OP_FLUSHTS: 8378 ttop = 0; 8379 break; 8380 8381 case DIF_OP_POPTS: 8382 if (ttop != 0) 8383 ttop--; 8384 break; 8385 } 8386 8387 sval = 0; 8388 srd = 0; 8389 8390 if (nkeys == 0) 8391 continue; 8392 8393 /* 8394 * We have a dynamic variable allocation; calculate its size. 8395 */ 8396 for (ksize = 0, i = 0; i < nkeys; i++) 8397 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 8398 8399 size = sizeof (dtrace_dynvar_t); 8400 size += sizeof (dtrace_key_t) * (nkeys - 1); 8401 size += ksize; 8402 8403 /* 8404 * Now we need to determine the size of the stored data. 8405 */ 8406 id = DIF_INSTR_VAR(instr); 8407 8408 for (i = 0; i < dp->dtdo_varlen; i++) { 8409 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8410 8411 if (v->dtdv_id == id && v->dtdv_scope == scope) { 8412 size += v->dtdv_type.dtdt_size; 8413 break; 8414 } 8415 } 8416 8417 if (i == dp->dtdo_varlen) 8418 return; 8419 8420 /* 8421 * We have the size. If this is larger than the chunk size 8422 * for our dynamic variable state, reset the chunk size. 8423 */ 8424 size = P2ROUNDUP(size, sizeof (uint64_t)); 8425 8426 if (size > vstate->dtvs_dynvars.dtds_chunksize) 8427 vstate->dtvs_dynvars.dtds_chunksize = size; 8428 } 8429 } 8430 8431 static void 8432 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8433 { 8434 int i, oldsvars, osz, nsz, otlocals, ntlocals; 8435 uint_t id; 8436 8437 ASSERT(MUTEX_HELD(&dtrace_lock)); 8438 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 8439 8440 for (i = 0; i < dp->dtdo_varlen; i++) { 8441 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8442 dtrace_statvar_t *svar, ***svarp; 8443 size_t dsize = 0; 8444 uint8_t scope = v->dtdv_scope; 8445 int *np; 8446 8447 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8448 continue; 8449 8450 id -= DIF_VAR_OTHER_UBASE; 8451 8452 switch (scope) { 8453 case DIFV_SCOPE_THREAD: 8454 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 8455 dtrace_difv_t *tlocals; 8456 8457 if ((ntlocals = (otlocals << 1)) == 0) 8458 ntlocals = 1; 8459 8460 osz = otlocals * sizeof (dtrace_difv_t); 8461 nsz = ntlocals * sizeof (dtrace_difv_t); 8462 8463 tlocals = kmem_zalloc(nsz, KM_SLEEP); 8464 8465 if (osz != 0) { 8466 bcopy(vstate->dtvs_tlocals, 8467 tlocals, osz); 8468 kmem_free(vstate->dtvs_tlocals, osz); 8469 } 8470 8471 vstate->dtvs_tlocals = tlocals; 8472 vstate->dtvs_ntlocals = ntlocals; 8473 } 8474 8475 vstate->dtvs_tlocals[id] = *v; 8476 continue; 8477 8478 case DIFV_SCOPE_LOCAL: 8479 np = &vstate->dtvs_nlocals; 8480 svarp = &vstate->dtvs_locals; 8481 8482 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8483 dsize = NCPU * (v->dtdv_type.dtdt_size + 8484 sizeof (uint64_t)); 8485 else 8486 dsize = NCPU * sizeof (uint64_t); 8487 8488 break; 8489 8490 case DIFV_SCOPE_GLOBAL: 8491 np = &vstate->dtvs_nglobals; 8492 svarp = &vstate->dtvs_globals; 8493 8494 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8495 dsize = v->dtdv_type.dtdt_size + 8496 sizeof (uint64_t); 8497 8498 break; 8499 8500 default: 8501 ASSERT(0); 8502 } 8503 8504 while (id >= (oldsvars = *np)) { 8505 dtrace_statvar_t **statics; 8506 int newsvars, oldsize, newsize; 8507 8508 if ((newsvars = (oldsvars << 1)) == 0) 8509 newsvars = 1; 8510 8511 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 8512 newsize = newsvars * sizeof (dtrace_statvar_t *); 8513 8514 statics = kmem_zalloc(newsize, KM_SLEEP); 8515 8516 if (oldsize != 0) { 8517 bcopy(*svarp, statics, oldsize); 8518 kmem_free(*svarp, oldsize); 8519 } 8520 8521 *svarp = statics; 8522 *np = newsvars; 8523 } 8524 8525 if ((svar = (*svarp)[id]) == NULL) { 8526 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 8527 svar->dtsv_var = *v; 8528 8529 if ((svar->dtsv_size = dsize) != 0) { 8530 svar->dtsv_data = (uint64_t)(uintptr_t) 8531 kmem_zalloc(dsize, KM_SLEEP); 8532 } 8533 8534 (*svarp)[id] = svar; 8535 } 8536 8537 svar->dtsv_refcnt++; 8538 } 8539 8540 dtrace_difo_chunksize(dp, vstate); 8541 dtrace_difo_hold(dp); 8542 } 8543 8544 static dtrace_difo_t * 8545 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8546 { 8547 dtrace_difo_t *new; 8548 size_t sz; 8549 8550 ASSERT(dp->dtdo_buf != NULL); 8551 ASSERT(dp->dtdo_refcnt != 0); 8552 8553 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 8554 8555 ASSERT(dp->dtdo_buf != NULL); 8556 sz = dp->dtdo_len * sizeof (dif_instr_t); 8557 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 8558 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 8559 new->dtdo_len = dp->dtdo_len; 8560 8561 if (dp->dtdo_strtab != NULL) { 8562 ASSERT(dp->dtdo_strlen != 0); 8563 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 8564 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 8565 new->dtdo_strlen = dp->dtdo_strlen; 8566 } 8567 8568 if (dp->dtdo_inttab != NULL) { 8569 ASSERT(dp->dtdo_intlen != 0); 8570 sz = dp->dtdo_intlen * sizeof (uint64_t); 8571 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8572 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8573 new->dtdo_intlen = dp->dtdo_intlen; 8574 } 8575 8576 if (dp->dtdo_vartab != NULL) { 8577 ASSERT(dp->dtdo_varlen != 0); 8578 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8579 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8580 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8581 new->dtdo_varlen = dp->dtdo_varlen; 8582 } 8583 8584 dtrace_difo_init(new, vstate); 8585 return (new); 8586 } 8587 8588 static void 8589 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8590 { 8591 int i; 8592 8593 ASSERT(dp->dtdo_refcnt == 0); 8594 8595 for (i = 0; i < dp->dtdo_varlen; i++) { 8596 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8597 dtrace_statvar_t *svar, **svarp; 8598 uint_t id; 8599 uint8_t scope = v->dtdv_scope; 8600 int *np; 8601 8602 switch (scope) { 8603 case DIFV_SCOPE_THREAD: 8604 continue; 8605 8606 case DIFV_SCOPE_LOCAL: 8607 np = &vstate->dtvs_nlocals; 8608 svarp = vstate->dtvs_locals; 8609 break; 8610 8611 case DIFV_SCOPE_GLOBAL: 8612 np = &vstate->dtvs_nglobals; 8613 svarp = vstate->dtvs_globals; 8614 break; 8615 8616 default: 8617 ASSERT(0); 8618 } 8619 8620 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8621 continue; 8622 8623 id -= DIF_VAR_OTHER_UBASE; 8624 ASSERT(id < *np); 8625 8626 svar = svarp[id]; 8627 ASSERT(svar != NULL); 8628 ASSERT(svar->dtsv_refcnt > 0); 8629 8630 if (--svar->dtsv_refcnt > 0) 8631 continue; 8632 8633 if (svar->dtsv_size != 0) { 8634 ASSERT(svar->dtsv_data != NULL); 8635 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8636 svar->dtsv_size); 8637 } 8638 8639 kmem_free(svar, sizeof (dtrace_statvar_t)); 8640 svarp[id] = NULL; 8641 } 8642 8643 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8644 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8645 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8646 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8647 8648 kmem_free(dp, sizeof (dtrace_difo_t)); 8649 } 8650 8651 static void 8652 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8653 { 8654 int i; 8655 8656 ASSERT(MUTEX_HELD(&dtrace_lock)); 8657 ASSERT(dp->dtdo_refcnt != 0); 8658 8659 for (i = 0; i < dp->dtdo_varlen; i++) { 8660 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8661 8662 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8663 continue; 8664 8665 ASSERT(dtrace_vtime_references > 0); 8666 if (--dtrace_vtime_references == 0) 8667 dtrace_vtime_disable(); 8668 } 8669 8670 if (--dp->dtdo_refcnt == 0) 8671 dtrace_difo_destroy(dp, vstate); 8672 } 8673 8674 /* 8675 * DTrace Format Functions 8676 */ 8677 static uint16_t 8678 dtrace_format_add(dtrace_state_t *state, char *str) 8679 { 8680 char *fmt, **new; 8681 uint16_t ndx, len = strlen(str) + 1; 8682 8683 fmt = kmem_zalloc(len, KM_SLEEP); 8684 bcopy(str, fmt, len); 8685 8686 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 8687 if (state->dts_formats[ndx] == NULL) { 8688 state->dts_formats[ndx] = fmt; 8689 return (ndx + 1); 8690 } 8691 } 8692 8693 if (state->dts_nformats == USHRT_MAX) { 8694 /* 8695 * This is only likely if a denial-of-service attack is being 8696 * attempted. As such, it's okay to fail silently here. 8697 */ 8698 kmem_free(fmt, len); 8699 return (0); 8700 } 8701 8702 /* 8703 * For simplicity, we always resize the formats array to be exactly the 8704 * number of formats. 8705 */ 8706 ndx = state->dts_nformats++; 8707 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 8708 8709 if (state->dts_formats != NULL) { 8710 ASSERT(ndx != 0); 8711 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 8712 kmem_free(state->dts_formats, ndx * sizeof (char *)); 8713 } 8714 8715 state->dts_formats = new; 8716 state->dts_formats[ndx] = fmt; 8717 8718 return (ndx + 1); 8719 } 8720 8721 static void 8722 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 8723 { 8724 char *fmt; 8725 8726 ASSERT(state->dts_formats != NULL); 8727 ASSERT(format <= state->dts_nformats); 8728 ASSERT(state->dts_formats[format - 1] != NULL); 8729 8730 fmt = state->dts_formats[format - 1]; 8731 kmem_free(fmt, strlen(fmt) + 1); 8732 state->dts_formats[format - 1] = NULL; 8733 } 8734 8735 static void 8736 dtrace_format_destroy(dtrace_state_t *state) 8737 { 8738 int i; 8739 8740 if (state->dts_nformats == 0) { 8741 ASSERT(state->dts_formats == NULL); 8742 return; 8743 } 8744 8745 ASSERT(state->dts_formats != NULL); 8746 8747 for (i = 0; i < state->dts_nformats; i++) { 8748 char *fmt = state->dts_formats[i]; 8749 8750 if (fmt == NULL) 8751 continue; 8752 8753 kmem_free(fmt, strlen(fmt) + 1); 8754 } 8755 8756 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 8757 state->dts_nformats = 0; 8758 state->dts_formats = NULL; 8759 } 8760 8761 /* 8762 * DTrace Predicate Functions 8763 */ 8764 static dtrace_predicate_t * 8765 dtrace_predicate_create(dtrace_difo_t *dp) 8766 { 8767 dtrace_predicate_t *pred; 8768 8769 ASSERT(MUTEX_HELD(&dtrace_lock)); 8770 ASSERT(dp->dtdo_refcnt != 0); 8771 8772 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 8773 pred->dtp_difo = dp; 8774 pred->dtp_refcnt = 1; 8775 8776 if (!dtrace_difo_cacheable(dp)) 8777 return (pred); 8778 8779 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 8780 /* 8781 * This is only theoretically possible -- we have had 2^32 8782 * cacheable predicates on this machine. We cannot allow any 8783 * more predicates to become cacheable: as unlikely as it is, 8784 * there may be a thread caching a (now stale) predicate cache 8785 * ID. (N.B.: the temptation is being successfully resisted to 8786 * have this cmn_err() "Holy shit -- we executed this code!") 8787 */ 8788 return (pred); 8789 } 8790 8791 pred->dtp_cacheid = dtrace_predcache_id++; 8792 8793 return (pred); 8794 } 8795 8796 static void 8797 dtrace_predicate_hold(dtrace_predicate_t *pred) 8798 { 8799 ASSERT(MUTEX_HELD(&dtrace_lock)); 8800 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 8801 ASSERT(pred->dtp_refcnt > 0); 8802 8803 pred->dtp_refcnt++; 8804 } 8805 8806 static void 8807 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 8808 { 8809 dtrace_difo_t *dp = pred->dtp_difo; 8810 8811 ASSERT(MUTEX_HELD(&dtrace_lock)); 8812 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 8813 ASSERT(pred->dtp_refcnt > 0); 8814 8815 if (--pred->dtp_refcnt == 0) { 8816 dtrace_difo_release(pred->dtp_difo, vstate); 8817 kmem_free(pred, sizeof (dtrace_predicate_t)); 8818 } 8819 } 8820 8821 /* 8822 * DTrace Action Description Functions 8823 */ 8824 static dtrace_actdesc_t * 8825 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 8826 uint64_t uarg, uint64_t arg) 8827 { 8828 dtrace_actdesc_t *act; 8829 8830 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 8831 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 8832 8833 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 8834 act->dtad_kind = kind; 8835 act->dtad_ntuple = ntuple; 8836 act->dtad_uarg = uarg; 8837 act->dtad_arg = arg; 8838 act->dtad_refcnt = 1; 8839 8840 return (act); 8841 } 8842 8843 static void 8844 dtrace_actdesc_hold(dtrace_actdesc_t *act) 8845 { 8846 ASSERT(act->dtad_refcnt >= 1); 8847 act->dtad_refcnt++; 8848 } 8849 8850 static void 8851 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 8852 { 8853 dtrace_actkind_t kind = act->dtad_kind; 8854 dtrace_difo_t *dp; 8855 8856 ASSERT(act->dtad_refcnt >= 1); 8857 8858 if (--act->dtad_refcnt != 0) 8859 return; 8860 8861 if ((dp = act->dtad_difo) != NULL) 8862 dtrace_difo_release(dp, vstate); 8863 8864 if (DTRACEACT_ISPRINTFLIKE(kind)) { 8865 char *str = (char *)(uintptr_t)act->dtad_arg; 8866 8867 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 8868 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 8869 8870 if (str != NULL) 8871 kmem_free(str, strlen(str) + 1); 8872 } 8873 8874 kmem_free(act, sizeof (dtrace_actdesc_t)); 8875 } 8876 8877 /* 8878 * DTrace ECB Functions 8879 */ 8880 static dtrace_ecb_t * 8881 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 8882 { 8883 dtrace_ecb_t *ecb; 8884 dtrace_epid_t epid; 8885 8886 ASSERT(MUTEX_HELD(&dtrace_lock)); 8887 8888 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 8889 ecb->dte_predicate = NULL; 8890 ecb->dte_probe = probe; 8891 8892 /* 8893 * The default size is the size of the default action: recording 8894 * the epid. 8895 */ 8896 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8897 ecb->dte_alignment = sizeof (dtrace_epid_t); 8898 8899 epid = state->dts_epid++; 8900 8901 if (epid - 1 >= state->dts_necbs) { 8902 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 8903 int necbs = state->dts_necbs << 1; 8904 8905 ASSERT(epid == state->dts_necbs + 1); 8906 8907 if (necbs == 0) { 8908 ASSERT(oecbs == NULL); 8909 necbs = 1; 8910 } 8911 8912 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 8913 8914 if (oecbs != NULL) 8915 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 8916 8917 dtrace_membar_producer(); 8918 state->dts_ecbs = ecbs; 8919 8920 if (oecbs != NULL) { 8921 /* 8922 * If this state is active, we must dtrace_sync() 8923 * before we can free the old dts_ecbs array: we're 8924 * coming in hot, and there may be active ring 8925 * buffer processing (which indexes into the dts_ecbs 8926 * array) on another CPU. 8927 */ 8928 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 8929 dtrace_sync(); 8930 8931 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 8932 } 8933 8934 dtrace_membar_producer(); 8935 state->dts_necbs = necbs; 8936 } 8937 8938 ecb->dte_state = state; 8939 8940 ASSERT(state->dts_ecbs[epid - 1] == NULL); 8941 dtrace_membar_producer(); 8942 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8943 8944 return (ecb); 8945 } 8946 8947 static void 8948 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8949 { 8950 dtrace_probe_t *probe = ecb->dte_probe; 8951 8952 ASSERT(MUTEX_HELD(&cpu_lock)); 8953 ASSERT(MUTEX_HELD(&dtrace_lock)); 8954 ASSERT(ecb->dte_next == NULL); 8955 8956 if (probe == NULL) { 8957 /* 8958 * This is the NULL probe -- there's nothing to do. 8959 */ 8960 return; 8961 } 8962 8963 if (probe->dtpr_ecb == NULL) { 8964 dtrace_provider_t *prov = probe->dtpr_provider; 8965 8966 /* 8967 * We're the first ECB on this probe. 8968 */ 8969 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8970 8971 if (ecb->dte_predicate != NULL) 8972 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8973 8974 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8975 probe->dtpr_id, probe->dtpr_arg); 8976 } else { 8977 /* 8978 * This probe is already active. Swing the last pointer to 8979 * point to the new ECB, and issue a dtrace_sync() to assure 8980 * that all CPUs have seen the change. 8981 */ 8982 ASSERT(probe->dtpr_ecb_last != NULL); 8983 probe->dtpr_ecb_last->dte_next = ecb; 8984 probe->dtpr_ecb_last = ecb; 8985 probe->dtpr_predcache = 0; 8986 8987 dtrace_sync(); 8988 } 8989 } 8990 8991 static void 8992 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8993 { 8994 uint32_t maxalign = sizeof (dtrace_epid_t); 8995 uint32_t align = sizeof (uint8_t), offs, diff; 8996 dtrace_action_t *act; 8997 int wastuple = 0; 8998 uint32_t aggbase = UINT32_MAX; 8999 dtrace_state_t *state = ecb->dte_state; 9000 9001 /* 9002 * If we record anything, we always record the epid. (And we always 9003 * record it first.) 9004 */ 9005 offs = sizeof (dtrace_epid_t); 9006 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9007 9008 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9009 dtrace_recdesc_t *rec = &act->dta_rec; 9010 9011 if ((align = rec->dtrd_alignment) > maxalign) 9012 maxalign = align; 9013 9014 if (!wastuple && act->dta_intuple) { 9015 /* 9016 * This is the first record in a tuple. Align the 9017 * offset to be at offset 4 in an 8-byte aligned 9018 * block. 9019 */ 9020 diff = offs + sizeof (dtrace_aggid_t); 9021 9022 if (diff = (diff & (sizeof (uint64_t) - 1))) 9023 offs += sizeof (uint64_t) - diff; 9024 9025 aggbase = offs - sizeof (dtrace_aggid_t); 9026 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9027 } 9028 9029 /*LINTED*/ 9030 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9031 /* 9032 * The current offset is not properly aligned; align it. 9033 */ 9034 offs += align - diff; 9035 } 9036 9037 rec->dtrd_offset = offs; 9038 9039 if (offs + rec->dtrd_size > ecb->dte_needed) { 9040 ecb->dte_needed = offs + rec->dtrd_size; 9041 9042 if (ecb->dte_needed > state->dts_needed) 9043 state->dts_needed = ecb->dte_needed; 9044 } 9045 9046 if (DTRACEACT_ISAGG(act->dta_kind)) { 9047 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9048 dtrace_action_t *first = agg->dtag_first, *prev; 9049 9050 ASSERT(rec->dtrd_size != 0 && first != NULL); 9051 ASSERT(wastuple); 9052 ASSERT(aggbase != UINT32_MAX); 9053 9054 agg->dtag_base = aggbase; 9055 9056 while ((prev = first->dta_prev) != NULL && 9057 DTRACEACT_ISAGG(prev->dta_kind)) { 9058 agg = (dtrace_aggregation_t *)prev; 9059 first = agg->dtag_first; 9060 } 9061 9062 if (prev != NULL) { 9063 offs = prev->dta_rec.dtrd_offset + 9064 prev->dta_rec.dtrd_size; 9065 } else { 9066 offs = sizeof (dtrace_epid_t); 9067 } 9068 wastuple = 0; 9069 } else { 9070 if (!act->dta_intuple) 9071 ecb->dte_size = offs + rec->dtrd_size; 9072 9073 offs += rec->dtrd_size; 9074 } 9075 9076 wastuple = act->dta_intuple; 9077 } 9078 9079 if ((act = ecb->dte_action) != NULL && 9080 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9081 ecb->dte_size == sizeof (dtrace_epid_t)) { 9082 /* 9083 * If the size is still sizeof (dtrace_epid_t), then all 9084 * actions store no data; set the size to 0. 9085 */ 9086 ecb->dte_alignment = maxalign; 9087 ecb->dte_size = 0; 9088 9089 /* 9090 * If the needed space is still sizeof (dtrace_epid_t), then 9091 * all actions need no additional space; set the needed 9092 * size to 0. 9093 */ 9094 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9095 ecb->dte_needed = 0; 9096 9097 return; 9098 } 9099 9100 /* 9101 * Set our alignment, and make sure that the dte_size and dte_needed 9102 * are aligned to the size of an EPID. 9103 */ 9104 ecb->dte_alignment = maxalign; 9105 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9106 ~(sizeof (dtrace_epid_t) - 1); 9107 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9108 ~(sizeof (dtrace_epid_t) - 1); 9109 ASSERT(ecb->dte_size <= ecb->dte_needed); 9110 } 9111 9112 static dtrace_action_t * 9113 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9114 { 9115 dtrace_aggregation_t *agg; 9116 size_t size = sizeof (uint64_t); 9117 int ntuple = desc->dtad_ntuple; 9118 dtrace_action_t *act; 9119 dtrace_recdesc_t *frec; 9120 dtrace_aggid_t aggid; 9121 dtrace_state_t *state = ecb->dte_state; 9122 9123 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9124 agg->dtag_ecb = ecb; 9125 9126 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9127 9128 switch (desc->dtad_kind) { 9129 case DTRACEAGG_MIN: 9130 agg->dtag_initial = UINT64_MAX; 9131 agg->dtag_aggregate = dtrace_aggregate_min; 9132 break; 9133 9134 case DTRACEAGG_MAX: 9135 agg->dtag_aggregate = dtrace_aggregate_max; 9136 break; 9137 9138 case DTRACEAGG_COUNT: 9139 agg->dtag_aggregate = dtrace_aggregate_count; 9140 break; 9141 9142 case DTRACEAGG_QUANTIZE: 9143 agg->dtag_aggregate = dtrace_aggregate_quantize; 9144 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9145 sizeof (uint64_t); 9146 break; 9147 9148 case DTRACEAGG_LQUANTIZE: { 9149 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9150 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9151 9152 agg->dtag_initial = desc->dtad_arg; 9153 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9154 9155 if (step == 0 || levels == 0) 9156 goto err; 9157 9158 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9159 break; 9160 } 9161 9162 case DTRACEAGG_AVG: 9163 agg->dtag_aggregate = dtrace_aggregate_avg; 9164 size = sizeof (uint64_t) * 2; 9165 break; 9166 9167 case DTRACEAGG_SUM: 9168 agg->dtag_aggregate = dtrace_aggregate_sum; 9169 break; 9170 9171 default: 9172 goto err; 9173 } 9174 9175 agg->dtag_action.dta_rec.dtrd_size = size; 9176 9177 if (ntuple == 0) 9178 goto err; 9179 9180 /* 9181 * We must make sure that we have enough actions for the n-tuple. 9182 */ 9183 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9184 if (DTRACEACT_ISAGG(act->dta_kind)) 9185 break; 9186 9187 if (--ntuple == 0) { 9188 /* 9189 * This is the action with which our n-tuple begins. 9190 */ 9191 agg->dtag_first = act; 9192 goto success; 9193 } 9194 } 9195 9196 /* 9197 * This n-tuple is short by ntuple elements. Return failure. 9198 */ 9199 ASSERT(ntuple != 0); 9200 err: 9201 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9202 return (NULL); 9203 9204 success: 9205 /* 9206 * If the last action in the tuple has a size of zero, it's actually 9207 * an expression argument for the aggregating action. 9208 */ 9209 ASSERT(ecb->dte_action_last != NULL); 9210 act = ecb->dte_action_last; 9211 9212 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9213 ASSERT(act->dta_difo != NULL); 9214 9215 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9216 agg->dtag_hasarg = 1; 9217 } 9218 9219 /* 9220 * We need to allocate an id for this aggregation. 9221 */ 9222 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9223 VM_BESTFIT | VM_SLEEP); 9224 9225 if (aggid - 1 >= state->dts_naggregations) { 9226 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9227 dtrace_aggregation_t **aggs; 9228 int naggs = state->dts_naggregations << 1; 9229 int onaggs = state->dts_naggregations; 9230 9231 ASSERT(aggid == state->dts_naggregations + 1); 9232 9233 if (naggs == 0) { 9234 ASSERT(oaggs == NULL); 9235 naggs = 1; 9236 } 9237 9238 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9239 9240 if (oaggs != NULL) { 9241 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9242 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9243 } 9244 9245 state->dts_aggregations = aggs; 9246 state->dts_naggregations = naggs; 9247 } 9248 9249 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9250 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9251 9252 frec = &agg->dtag_first->dta_rec; 9253 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9254 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9255 9256 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9257 ASSERT(!act->dta_intuple); 9258 act->dta_intuple = 1; 9259 } 9260 9261 return (&agg->dtag_action); 9262 } 9263 9264 static void 9265 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9266 { 9267 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9268 dtrace_state_t *state = ecb->dte_state; 9269 dtrace_aggid_t aggid = agg->dtag_id; 9270 9271 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9272 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9273 9274 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9275 state->dts_aggregations[aggid - 1] = NULL; 9276 9277 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9278 } 9279 9280 static int 9281 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9282 { 9283 dtrace_action_t *action, *last; 9284 dtrace_difo_t *dp = desc->dtad_difo; 9285 uint32_t size = 0, align = sizeof (uint8_t), mask; 9286 uint16_t format = 0; 9287 dtrace_recdesc_t *rec; 9288 dtrace_state_t *state = ecb->dte_state; 9289 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9290 uint64_t arg = desc->dtad_arg; 9291 9292 ASSERT(MUTEX_HELD(&dtrace_lock)); 9293 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9294 9295 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9296 /* 9297 * If this is an aggregating action, there must be neither 9298 * a speculate nor a commit on the action chain. 9299 */ 9300 dtrace_action_t *act; 9301 9302 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9303 if (act->dta_kind == DTRACEACT_COMMIT) 9304 return (EINVAL); 9305 9306 if (act->dta_kind == DTRACEACT_SPECULATE) 9307 return (EINVAL); 9308 } 9309 9310 action = dtrace_ecb_aggregation_create(ecb, desc); 9311 9312 if (action == NULL) 9313 return (EINVAL); 9314 } else { 9315 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9316 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9317 dp != NULL && dp->dtdo_destructive)) { 9318 state->dts_destructive = 1; 9319 } 9320 9321 switch (desc->dtad_kind) { 9322 case DTRACEACT_PRINTF: 9323 case DTRACEACT_PRINTA: 9324 case DTRACEACT_SYSTEM: 9325 case DTRACEACT_FREOPEN: 9326 /* 9327 * We know that our arg is a string -- turn it into a 9328 * format. 9329 */ 9330 if (arg == NULL) { 9331 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 9332 format = 0; 9333 } else { 9334 ASSERT(arg != NULL); 9335 ASSERT(arg > KERNELBASE); 9336 format = dtrace_format_add(state, 9337 (char *)(uintptr_t)arg); 9338 } 9339 9340 /*FALLTHROUGH*/ 9341 case DTRACEACT_LIBACT: 9342 case DTRACEACT_DIFEXPR: 9343 if (dp == NULL) 9344 return (EINVAL); 9345 9346 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9347 break; 9348 9349 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 9350 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9351 return (EINVAL); 9352 9353 size = opt[DTRACEOPT_STRSIZE]; 9354 } 9355 9356 break; 9357 9358 case DTRACEACT_STACK: 9359 if ((nframes = arg) == 0) { 9360 nframes = opt[DTRACEOPT_STACKFRAMES]; 9361 ASSERT(nframes > 0); 9362 arg = nframes; 9363 } 9364 9365 size = nframes * sizeof (pc_t); 9366 break; 9367 9368 case DTRACEACT_JSTACK: 9369 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 9370 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 9371 9372 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 9373 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 9374 9375 arg = DTRACE_USTACK_ARG(nframes, strsize); 9376 9377 /*FALLTHROUGH*/ 9378 case DTRACEACT_USTACK: 9379 if (desc->dtad_kind != DTRACEACT_JSTACK && 9380 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 9381 strsize = DTRACE_USTACK_STRSIZE(arg); 9382 nframes = opt[DTRACEOPT_USTACKFRAMES]; 9383 ASSERT(nframes > 0); 9384 arg = DTRACE_USTACK_ARG(nframes, strsize); 9385 } 9386 9387 /* 9388 * Save a slot for the pid. 9389 */ 9390 size = (nframes + 1) * sizeof (uint64_t); 9391 size += DTRACE_USTACK_STRSIZE(arg); 9392 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 9393 9394 break; 9395 9396 case DTRACEACT_SYM: 9397 case DTRACEACT_MOD: 9398 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 9399 sizeof (uint64_t)) || 9400 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9401 return (EINVAL); 9402 break; 9403 9404 case DTRACEACT_USYM: 9405 case DTRACEACT_UMOD: 9406 case DTRACEACT_UADDR: 9407 if (dp == NULL || 9408 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 9409 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9410 return (EINVAL); 9411 9412 /* 9413 * We have a slot for the pid, plus a slot for the 9414 * argument. To keep things simple (aligned with 9415 * bitness-neutral sizing), we store each as a 64-bit 9416 * quantity. 9417 */ 9418 size = 2 * sizeof (uint64_t); 9419 break; 9420 9421 case DTRACEACT_STOP: 9422 case DTRACEACT_BREAKPOINT: 9423 case DTRACEACT_PANIC: 9424 break; 9425 9426 case DTRACEACT_CHILL: 9427 case DTRACEACT_DISCARD: 9428 case DTRACEACT_RAISE: 9429 if (dp == NULL) 9430 return (EINVAL); 9431 break; 9432 9433 case DTRACEACT_EXIT: 9434 if (dp == NULL || 9435 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 9436 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9437 return (EINVAL); 9438 break; 9439 9440 case DTRACEACT_SPECULATE: 9441 if (ecb->dte_size > sizeof (dtrace_epid_t)) 9442 return (EINVAL); 9443 9444 if (dp == NULL) 9445 return (EINVAL); 9446 9447 state->dts_speculates = 1; 9448 break; 9449 9450 case DTRACEACT_COMMIT: { 9451 dtrace_action_t *act = ecb->dte_action; 9452 9453 for (; act != NULL; act = act->dta_next) { 9454 if (act->dta_kind == DTRACEACT_COMMIT) 9455 return (EINVAL); 9456 } 9457 9458 if (dp == NULL) 9459 return (EINVAL); 9460 break; 9461 } 9462 9463 default: 9464 return (EINVAL); 9465 } 9466 9467 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 9468 /* 9469 * If this is a data-storing action or a speculate, 9470 * we must be sure that there isn't a commit on the 9471 * action chain. 9472 */ 9473 dtrace_action_t *act = ecb->dte_action; 9474 9475 for (; act != NULL; act = act->dta_next) { 9476 if (act->dta_kind == DTRACEACT_COMMIT) 9477 return (EINVAL); 9478 } 9479 } 9480 9481 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 9482 action->dta_rec.dtrd_size = size; 9483 } 9484 9485 action->dta_refcnt = 1; 9486 rec = &action->dta_rec; 9487 size = rec->dtrd_size; 9488 9489 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 9490 if (!(size & mask)) { 9491 align = mask + 1; 9492 break; 9493 } 9494 } 9495 9496 action->dta_kind = desc->dtad_kind; 9497 9498 if ((action->dta_difo = dp) != NULL) 9499 dtrace_difo_hold(dp); 9500 9501 rec->dtrd_action = action->dta_kind; 9502 rec->dtrd_arg = arg; 9503 rec->dtrd_uarg = desc->dtad_uarg; 9504 rec->dtrd_alignment = (uint16_t)align; 9505 rec->dtrd_format = format; 9506 9507 if ((last = ecb->dte_action_last) != NULL) { 9508 ASSERT(ecb->dte_action != NULL); 9509 action->dta_prev = last; 9510 last->dta_next = action; 9511 } else { 9512 ASSERT(ecb->dte_action == NULL); 9513 ecb->dte_action = action; 9514 } 9515 9516 ecb->dte_action_last = action; 9517 9518 return (0); 9519 } 9520 9521 static void 9522 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 9523 { 9524 dtrace_action_t *act = ecb->dte_action, *next; 9525 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 9526 dtrace_difo_t *dp; 9527 uint16_t format; 9528 9529 if (act != NULL && act->dta_refcnt > 1) { 9530 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 9531 act->dta_refcnt--; 9532 } else { 9533 for (; act != NULL; act = next) { 9534 next = act->dta_next; 9535 ASSERT(next != NULL || act == ecb->dte_action_last); 9536 ASSERT(act->dta_refcnt == 1); 9537 9538 if ((format = act->dta_rec.dtrd_format) != 0) 9539 dtrace_format_remove(ecb->dte_state, format); 9540 9541 if ((dp = act->dta_difo) != NULL) 9542 dtrace_difo_release(dp, vstate); 9543 9544 if (DTRACEACT_ISAGG(act->dta_kind)) { 9545 dtrace_ecb_aggregation_destroy(ecb, act); 9546 } else { 9547 kmem_free(act, sizeof (dtrace_action_t)); 9548 } 9549 } 9550 } 9551 9552 ecb->dte_action = NULL; 9553 ecb->dte_action_last = NULL; 9554 ecb->dte_size = sizeof (dtrace_epid_t); 9555 } 9556 9557 static void 9558 dtrace_ecb_disable(dtrace_ecb_t *ecb) 9559 { 9560 /* 9561 * We disable the ECB by removing it from its probe. 9562 */ 9563 dtrace_ecb_t *pecb, *prev = NULL; 9564 dtrace_probe_t *probe = ecb->dte_probe; 9565 9566 ASSERT(MUTEX_HELD(&dtrace_lock)); 9567 9568 if (probe == NULL) { 9569 /* 9570 * This is the NULL probe; there is nothing to disable. 9571 */ 9572 return; 9573 } 9574 9575 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9576 if (pecb == ecb) 9577 break; 9578 prev = pecb; 9579 } 9580 9581 ASSERT(pecb != NULL); 9582 9583 if (prev == NULL) { 9584 probe->dtpr_ecb = ecb->dte_next; 9585 } else { 9586 prev->dte_next = ecb->dte_next; 9587 } 9588 9589 if (ecb == probe->dtpr_ecb_last) { 9590 ASSERT(ecb->dte_next == NULL); 9591 probe->dtpr_ecb_last = prev; 9592 } 9593 9594 /* 9595 * The ECB has been disconnected from the probe; now sync to assure 9596 * that all CPUs have seen the change before returning. 9597 */ 9598 dtrace_sync(); 9599 9600 if (probe->dtpr_ecb == NULL) { 9601 /* 9602 * That was the last ECB on the probe; clear the predicate 9603 * cache ID for the probe, disable it and sync one more time 9604 * to assure that we'll never hit it again. 9605 */ 9606 dtrace_provider_t *prov = probe->dtpr_provider; 9607 9608 ASSERT(ecb->dte_next == NULL); 9609 ASSERT(probe->dtpr_ecb_last == NULL); 9610 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9611 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9612 probe->dtpr_id, probe->dtpr_arg); 9613 dtrace_sync(); 9614 } else { 9615 /* 9616 * There is at least one ECB remaining on the probe. If there 9617 * is _exactly_ one, set the probe's predicate cache ID to be 9618 * the predicate cache ID of the remaining ECB. 9619 */ 9620 ASSERT(probe->dtpr_ecb_last != NULL); 9621 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9622 9623 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9624 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9625 9626 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9627 9628 if (p != NULL) 9629 probe->dtpr_predcache = p->dtp_cacheid; 9630 } 9631 9632 ecb->dte_next = NULL; 9633 } 9634 } 9635 9636 static void 9637 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9638 { 9639 dtrace_state_t *state = ecb->dte_state; 9640 dtrace_vstate_t *vstate = &state->dts_vstate; 9641 dtrace_predicate_t *pred; 9642 dtrace_epid_t epid = ecb->dte_epid; 9643 9644 ASSERT(MUTEX_HELD(&dtrace_lock)); 9645 ASSERT(ecb->dte_next == NULL); 9646 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 9647 9648 if ((pred = ecb->dte_predicate) != NULL) 9649 dtrace_predicate_release(pred, vstate); 9650 9651 dtrace_ecb_action_remove(ecb); 9652 9653 ASSERT(state->dts_ecbs[epid - 1] == ecb); 9654 state->dts_ecbs[epid - 1] = NULL; 9655 9656 kmem_free(ecb, sizeof (dtrace_ecb_t)); 9657 } 9658 9659 static dtrace_ecb_t * 9660 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 9661 dtrace_enabling_t *enab) 9662 { 9663 dtrace_ecb_t *ecb; 9664 dtrace_predicate_t *pred; 9665 dtrace_actdesc_t *act; 9666 dtrace_provider_t *prov; 9667 dtrace_ecbdesc_t *desc = enab->dten_current; 9668 9669 ASSERT(MUTEX_HELD(&dtrace_lock)); 9670 ASSERT(state != NULL); 9671 9672 ecb = dtrace_ecb_add(state, probe); 9673 ecb->dte_uarg = desc->dted_uarg; 9674 9675 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 9676 dtrace_predicate_hold(pred); 9677 ecb->dte_predicate = pred; 9678 } 9679 9680 if (probe != NULL) { 9681 /* 9682 * If the provider shows more leg than the consumer is old 9683 * enough to see, we need to enable the appropriate implicit 9684 * predicate bits to prevent the ecb from activating at 9685 * revealing times. 9686 * 9687 * Providers specifying DTRACE_PRIV_USER at register time 9688 * are stating that they need the /proc-style privilege 9689 * model to be enforced, and this is what DTRACE_COND_OWNER 9690 * and DTRACE_COND_ZONEOWNER will then do at probe time. 9691 */ 9692 prov = probe->dtpr_provider; 9693 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 9694 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9695 ecb->dte_cond |= DTRACE_COND_OWNER; 9696 9697 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 9698 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9699 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 9700 9701 /* 9702 * If the provider shows us kernel innards and the user 9703 * is lacking sufficient privilege, enable the 9704 * DTRACE_COND_USERMODE implicit predicate. 9705 */ 9706 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 9707 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 9708 ecb->dte_cond |= DTRACE_COND_USERMODE; 9709 } 9710 9711 if (dtrace_ecb_create_cache != NULL) { 9712 /* 9713 * If we have a cached ecb, we'll use its action list instead 9714 * of creating our own (saving both time and space). 9715 */ 9716 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 9717 dtrace_action_t *act = cached->dte_action; 9718 9719 if (act != NULL) { 9720 ASSERT(act->dta_refcnt > 0); 9721 act->dta_refcnt++; 9722 ecb->dte_action = act; 9723 ecb->dte_action_last = cached->dte_action_last; 9724 ecb->dte_needed = cached->dte_needed; 9725 ecb->dte_size = cached->dte_size; 9726 ecb->dte_alignment = cached->dte_alignment; 9727 } 9728 9729 return (ecb); 9730 } 9731 9732 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 9733 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 9734 dtrace_ecb_destroy(ecb); 9735 return (NULL); 9736 } 9737 } 9738 9739 dtrace_ecb_resize(ecb); 9740 9741 return (dtrace_ecb_create_cache = ecb); 9742 } 9743 9744 static int 9745 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 9746 { 9747 dtrace_ecb_t *ecb; 9748 dtrace_enabling_t *enab = arg; 9749 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 9750 9751 ASSERT(state != NULL); 9752 9753 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 9754 /* 9755 * This probe was created in a generation for which this 9756 * enabling has previously created ECBs; we don't want to 9757 * enable it again, so just kick out. 9758 */ 9759 return (DTRACE_MATCH_NEXT); 9760 } 9761 9762 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 9763 return (DTRACE_MATCH_DONE); 9764 9765 dtrace_ecb_enable(ecb); 9766 return (DTRACE_MATCH_NEXT); 9767 } 9768 9769 static dtrace_ecb_t * 9770 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 9771 { 9772 dtrace_ecb_t *ecb; 9773 9774 ASSERT(MUTEX_HELD(&dtrace_lock)); 9775 9776 if (id == 0 || id > state->dts_necbs) 9777 return (NULL); 9778 9779 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 9780 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 9781 9782 return (state->dts_ecbs[id - 1]); 9783 } 9784 9785 static dtrace_aggregation_t * 9786 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 9787 { 9788 dtrace_aggregation_t *agg; 9789 9790 ASSERT(MUTEX_HELD(&dtrace_lock)); 9791 9792 if (id == 0 || id > state->dts_naggregations) 9793 return (NULL); 9794 9795 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 9796 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 9797 agg->dtag_id == id); 9798 9799 return (state->dts_aggregations[id - 1]); 9800 } 9801 9802 /* 9803 * DTrace Buffer Functions 9804 * 9805 * The following functions manipulate DTrace buffers. Most of these functions 9806 * are called in the context of establishing or processing consumer state; 9807 * exceptions are explicitly noted. 9808 */ 9809 9810 /* 9811 * Note: called from cross call context. This function switches the two 9812 * buffers on a given CPU. The atomicity of this operation is assured by 9813 * disabling interrupts while the actual switch takes place; the disabling of 9814 * interrupts serializes the execution with any execution of dtrace_probe() on 9815 * the same CPU. 9816 */ 9817 static void 9818 dtrace_buffer_switch(dtrace_buffer_t *buf) 9819 { 9820 caddr_t tomax = buf->dtb_tomax; 9821 caddr_t xamot = buf->dtb_xamot; 9822 dtrace_icookie_t cookie; 9823 9824 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9825 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 9826 9827 cookie = dtrace_interrupt_disable(); 9828 buf->dtb_tomax = xamot; 9829 buf->dtb_xamot = tomax; 9830 buf->dtb_xamot_drops = buf->dtb_drops; 9831 buf->dtb_xamot_offset = buf->dtb_offset; 9832 buf->dtb_xamot_errors = buf->dtb_errors; 9833 buf->dtb_xamot_flags = buf->dtb_flags; 9834 buf->dtb_offset = 0; 9835 buf->dtb_drops = 0; 9836 buf->dtb_errors = 0; 9837 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 9838 dtrace_interrupt_enable(cookie); 9839 } 9840 9841 /* 9842 * Note: called from cross call context. This function activates a buffer 9843 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 9844 * is guaranteed by the disabling of interrupts. 9845 */ 9846 static void 9847 dtrace_buffer_activate(dtrace_state_t *state) 9848 { 9849 dtrace_buffer_t *buf; 9850 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 9851 9852 buf = &state->dts_buffer[CPU->cpu_id]; 9853 9854 if (buf->dtb_tomax != NULL) { 9855 /* 9856 * We might like to assert that the buffer is marked inactive, 9857 * but this isn't necessarily true: the buffer for the CPU 9858 * that processes the BEGIN probe has its buffer activated 9859 * manually. In this case, we take the (harmless) action 9860 * re-clearing the bit INACTIVE bit. 9861 */ 9862 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 9863 } 9864 9865 dtrace_interrupt_enable(cookie); 9866 } 9867 9868 static int 9869 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 9870 processorid_t cpu) 9871 { 9872 cpu_t *cp; 9873 dtrace_buffer_t *buf; 9874 9875 ASSERT(MUTEX_HELD(&cpu_lock)); 9876 ASSERT(MUTEX_HELD(&dtrace_lock)); 9877 9878 if (size > dtrace_nonroot_maxsize && 9879 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 9880 return (EFBIG); 9881 9882 cp = cpu_list; 9883 9884 do { 9885 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9886 continue; 9887 9888 buf = &bufs[cp->cpu_id]; 9889 9890 /* 9891 * If there is already a buffer allocated for this CPU, it 9892 * is only possible that this is a DR event. In this case, 9893 * the buffer size must match our specified size. 9894 */ 9895 if (buf->dtb_tomax != NULL) { 9896 ASSERT(buf->dtb_size == size); 9897 continue; 9898 } 9899 9900 ASSERT(buf->dtb_xamot == NULL); 9901 9902 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9903 goto err; 9904 9905 buf->dtb_size = size; 9906 buf->dtb_flags = flags; 9907 buf->dtb_offset = 0; 9908 buf->dtb_drops = 0; 9909 9910 if (flags & DTRACEBUF_NOSWITCH) 9911 continue; 9912 9913 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9914 goto err; 9915 } while ((cp = cp->cpu_next) != cpu_list); 9916 9917 return (0); 9918 9919 err: 9920 cp = cpu_list; 9921 9922 do { 9923 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9924 continue; 9925 9926 buf = &bufs[cp->cpu_id]; 9927 9928 if (buf->dtb_xamot != NULL) { 9929 ASSERT(buf->dtb_tomax != NULL); 9930 ASSERT(buf->dtb_size == size); 9931 kmem_free(buf->dtb_xamot, size); 9932 } 9933 9934 if (buf->dtb_tomax != NULL) { 9935 ASSERT(buf->dtb_size == size); 9936 kmem_free(buf->dtb_tomax, size); 9937 } 9938 9939 buf->dtb_tomax = NULL; 9940 buf->dtb_xamot = NULL; 9941 buf->dtb_size = 0; 9942 } while ((cp = cp->cpu_next) != cpu_list); 9943 9944 return (ENOMEM); 9945 } 9946 9947 /* 9948 * Note: called from probe context. This function just increments the drop 9949 * count on a buffer. It has been made a function to allow for the 9950 * possibility of understanding the source of mysterious drop counts. (A 9951 * problem for which one may be particularly disappointed that DTrace cannot 9952 * be used to understand DTrace.) 9953 */ 9954 static void 9955 dtrace_buffer_drop(dtrace_buffer_t *buf) 9956 { 9957 buf->dtb_drops++; 9958 } 9959 9960 /* 9961 * Note: called from probe context. This function is called to reserve space 9962 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9963 * mstate. Returns the new offset in the buffer, or a negative value if an 9964 * error has occurred. 9965 */ 9966 static intptr_t 9967 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9968 dtrace_state_t *state, dtrace_mstate_t *mstate) 9969 { 9970 intptr_t offs = buf->dtb_offset, soffs; 9971 intptr_t woffs; 9972 caddr_t tomax; 9973 size_t total; 9974 9975 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9976 return (-1); 9977 9978 if ((tomax = buf->dtb_tomax) == NULL) { 9979 dtrace_buffer_drop(buf); 9980 return (-1); 9981 } 9982 9983 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9984 while (offs & (align - 1)) { 9985 /* 9986 * Assert that our alignment is off by a number which 9987 * is itself sizeof (uint32_t) aligned. 9988 */ 9989 ASSERT(!((align - (offs & (align - 1))) & 9990 (sizeof (uint32_t) - 1))); 9991 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9992 offs += sizeof (uint32_t); 9993 } 9994 9995 if ((soffs = offs + needed) > buf->dtb_size) { 9996 dtrace_buffer_drop(buf); 9997 return (-1); 9998 } 9999 10000 if (mstate == NULL) 10001 return (offs); 10002 10003 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10004 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10005 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10006 10007 return (offs); 10008 } 10009 10010 if (buf->dtb_flags & DTRACEBUF_FILL) { 10011 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10012 (buf->dtb_flags & DTRACEBUF_FULL)) 10013 return (-1); 10014 goto out; 10015 } 10016 10017 total = needed + (offs & (align - 1)); 10018 10019 /* 10020 * For a ring buffer, life is quite a bit more complicated. Before 10021 * we can store any padding, we need to adjust our wrapping offset. 10022 * (If we've never before wrapped or we're not about to, no adjustment 10023 * is required.) 10024 */ 10025 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10026 offs + total > buf->dtb_size) { 10027 woffs = buf->dtb_xamot_offset; 10028 10029 if (offs + total > buf->dtb_size) { 10030 /* 10031 * We can't fit in the end of the buffer. First, a 10032 * sanity check that we can fit in the buffer at all. 10033 */ 10034 if (total > buf->dtb_size) { 10035 dtrace_buffer_drop(buf); 10036 return (-1); 10037 } 10038 10039 /* 10040 * We're going to be storing at the top of the buffer, 10041 * so now we need to deal with the wrapped offset. We 10042 * only reset our wrapped offset to 0 if it is 10043 * currently greater than the current offset. If it 10044 * is less than the current offset, it is because a 10045 * previous allocation induced a wrap -- but the 10046 * allocation didn't subsequently take the space due 10047 * to an error or false predicate evaluation. In this 10048 * case, we'll just leave the wrapped offset alone: if 10049 * the wrapped offset hasn't been advanced far enough 10050 * for this allocation, it will be adjusted in the 10051 * lower loop. 10052 */ 10053 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10054 if (woffs >= offs) 10055 woffs = 0; 10056 } else { 10057 woffs = 0; 10058 } 10059 10060 /* 10061 * Now we know that we're going to be storing to the 10062 * top of the buffer and that there is room for us 10063 * there. We need to clear the buffer from the current 10064 * offset to the end (there may be old gunk there). 10065 */ 10066 while (offs < buf->dtb_size) 10067 tomax[offs++] = 0; 10068 10069 /* 10070 * We need to set our offset to zero. And because we 10071 * are wrapping, we need to set the bit indicating as 10072 * much. We can also adjust our needed space back 10073 * down to the space required by the ECB -- we know 10074 * that the top of the buffer is aligned. 10075 */ 10076 offs = 0; 10077 total = needed; 10078 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10079 } else { 10080 /* 10081 * There is room for us in the buffer, so we simply 10082 * need to check the wrapped offset. 10083 */ 10084 if (woffs < offs) { 10085 /* 10086 * The wrapped offset is less than the offset. 10087 * This can happen if we allocated buffer space 10088 * that induced a wrap, but then we didn't 10089 * subsequently take the space due to an error 10090 * or false predicate evaluation. This is 10091 * okay; we know that _this_ allocation isn't 10092 * going to induce a wrap. We still can't 10093 * reset the wrapped offset to be zero, 10094 * however: the space may have been trashed in 10095 * the previous failed probe attempt. But at 10096 * least the wrapped offset doesn't need to 10097 * be adjusted at all... 10098 */ 10099 goto out; 10100 } 10101 } 10102 10103 while (offs + total > woffs) { 10104 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10105 size_t size; 10106 10107 if (epid == DTRACE_EPIDNONE) { 10108 size = sizeof (uint32_t); 10109 } else { 10110 ASSERT(epid <= state->dts_necbs); 10111 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10112 10113 size = state->dts_ecbs[epid - 1]->dte_size; 10114 } 10115 10116 ASSERT(woffs + size <= buf->dtb_size); 10117 ASSERT(size != 0); 10118 10119 if (woffs + size == buf->dtb_size) { 10120 /* 10121 * We've reached the end of the buffer; we want 10122 * to set the wrapped offset to 0 and break 10123 * out. However, if the offs is 0, then we're 10124 * in a strange edge-condition: the amount of 10125 * space that we want to reserve plus the size 10126 * of the record that we're overwriting is 10127 * greater than the size of the buffer. This 10128 * is problematic because if we reserve the 10129 * space but subsequently don't consume it (due 10130 * to a failed predicate or error) the wrapped 10131 * offset will be 0 -- yet the EPID at offset 0 10132 * will not be committed. This situation is 10133 * relatively easy to deal with: if we're in 10134 * this case, the buffer is indistinguishable 10135 * from one that hasn't wrapped; we need only 10136 * finish the job by clearing the wrapped bit, 10137 * explicitly setting the offset to be 0, and 10138 * zero'ing out the old data in the buffer. 10139 */ 10140 if (offs == 0) { 10141 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10142 buf->dtb_offset = 0; 10143 woffs = total; 10144 10145 while (woffs < buf->dtb_size) 10146 tomax[woffs++] = 0; 10147 } 10148 10149 woffs = 0; 10150 break; 10151 } 10152 10153 woffs += size; 10154 } 10155 10156 /* 10157 * We have a wrapped offset. It may be that the wrapped offset 10158 * has become zero -- that's okay. 10159 */ 10160 buf->dtb_xamot_offset = woffs; 10161 } 10162 10163 out: 10164 /* 10165 * Now we can plow the buffer with any necessary padding. 10166 */ 10167 while (offs & (align - 1)) { 10168 /* 10169 * Assert that our alignment is off by a number which 10170 * is itself sizeof (uint32_t) aligned. 10171 */ 10172 ASSERT(!((align - (offs & (align - 1))) & 10173 (sizeof (uint32_t) - 1))); 10174 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10175 offs += sizeof (uint32_t); 10176 } 10177 10178 if (buf->dtb_flags & DTRACEBUF_FILL) { 10179 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10180 buf->dtb_flags |= DTRACEBUF_FULL; 10181 return (-1); 10182 } 10183 } 10184 10185 if (mstate == NULL) 10186 return (offs); 10187 10188 /* 10189 * For ring buffers and fill buffers, the scratch space is always 10190 * the inactive buffer. 10191 */ 10192 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10193 mstate->dtms_scratch_size = buf->dtb_size; 10194 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10195 10196 return (offs); 10197 } 10198 10199 static void 10200 dtrace_buffer_polish(dtrace_buffer_t *buf) 10201 { 10202 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10203 ASSERT(MUTEX_HELD(&dtrace_lock)); 10204 10205 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10206 return; 10207 10208 /* 10209 * We need to polish the ring buffer. There are three cases: 10210 * 10211 * - The first (and presumably most common) is that there is no gap 10212 * between the buffer offset and the wrapped offset. In this case, 10213 * there is nothing in the buffer that isn't valid data; we can 10214 * mark the buffer as polished and return. 10215 * 10216 * - The second (less common than the first but still more common 10217 * than the third) is that there is a gap between the buffer offset 10218 * and the wrapped offset, and the wrapped offset is larger than the 10219 * buffer offset. This can happen because of an alignment issue, or 10220 * can happen because of a call to dtrace_buffer_reserve() that 10221 * didn't subsequently consume the buffer space. In this case, 10222 * we need to zero the data from the buffer offset to the wrapped 10223 * offset. 10224 * 10225 * - The third (and least common) is that there is a gap between the 10226 * buffer offset and the wrapped offset, but the wrapped offset is 10227 * _less_ than the buffer offset. This can only happen because a 10228 * call to dtrace_buffer_reserve() induced a wrap, but the space 10229 * was not subsequently consumed. In this case, we need to zero the 10230 * space from the offset to the end of the buffer _and_ from the 10231 * top of the buffer to the wrapped offset. 10232 */ 10233 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10234 bzero(buf->dtb_tomax + buf->dtb_offset, 10235 buf->dtb_xamot_offset - buf->dtb_offset); 10236 } 10237 10238 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10239 bzero(buf->dtb_tomax + buf->dtb_offset, 10240 buf->dtb_size - buf->dtb_offset); 10241 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10242 } 10243 } 10244 10245 static void 10246 dtrace_buffer_free(dtrace_buffer_t *bufs) 10247 { 10248 int i; 10249 10250 for (i = 0; i < NCPU; i++) { 10251 dtrace_buffer_t *buf = &bufs[i]; 10252 10253 if (buf->dtb_tomax == NULL) { 10254 ASSERT(buf->dtb_xamot == NULL); 10255 ASSERT(buf->dtb_size == 0); 10256 continue; 10257 } 10258 10259 if (buf->dtb_xamot != NULL) { 10260 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10261 kmem_free(buf->dtb_xamot, buf->dtb_size); 10262 } 10263 10264 kmem_free(buf->dtb_tomax, buf->dtb_size); 10265 buf->dtb_size = 0; 10266 buf->dtb_tomax = NULL; 10267 buf->dtb_xamot = NULL; 10268 } 10269 } 10270 10271 /* 10272 * DTrace Enabling Functions 10273 */ 10274 static dtrace_enabling_t * 10275 dtrace_enabling_create(dtrace_vstate_t *vstate) 10276 { 10277 dtrace_enabling_t *enab; 10278 10279 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10280 enab->dten_vstate = vstate; 10281 10282 return (enab); 10283 } 10284 10285 static void 10286 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10287 { 10288 dtrace_ecbdesc_t **ndesc; 10289 size_t osize, nsize; 10290 10291 /* 10292 * We can't add to enablings after we've enabled them, or after we've 10293 * retained them. 10294 */ 10295 ASSERT(enab->dten_probegen == 0); 10296 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10297 10298 if (enab->dten_ndesc < enab->dten_maxdesc) { 10299 enab->dten_desc[enab->dten_ndesc++] = ecb; 10300 return; 10301 } 10302 10303 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10304 10305 if (enab->dten_maxdesc == 0) { 10306 enab->dten_maxdesc = 1; 10307 } else { 10308 enab->dten_maxdesc <<= 1; 10309 } 10310 10311 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 10312 10313 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10314 ndesc = kmem_zalloc(nsize, KM_SLEEP); 10315 bcopy(enab->dten_desc, ndesc, osize); 10316 kmem_free(enab->dten_desc, osize); 10317 10318 enab->dten_desc = ndesc; 10319 enab->dten_desc[enab->dten_ndesc++] = ecb; 10320 } 10321 10322 static void 10323 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 10324 dtrace_probedesc_t *pd) 10325 { 10326 dtrace_ecbdesc_t *new; 10327 dtrace_predicate_t *pred; 10328 dtrace_actdesc_t *act; 10329 10330 /* 10331 * We're going to create a new ECB description that matches the 10332 * specified ECB in every way, but has the specified probe description. 10333 */ 10334 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10335 10336 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 10337 dtrace_predicate_hold(pred); 10338 10339 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 10340 dtrace_actdesc_hold(act); 10341 10342 new->dted_action = ecb->dted_action; 10343 new->dted_pred = ecb->dted_pred; 10344 new->dted_probe = *pd; 10345 new->dted_uarg = ecb->dted_uarg; 10346 10347 dtrace_enabling_add(enab, new); 10348 } 10349 10350 static void 10351 dtrace_enabling_dump(dtrace_enabling_t *enab) 10352 { 10353 int i; 10354 10355 for (i = 0; i < enab->dten_ndesc; i++) { 10356 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 10357 10358 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 10359 desc->dtpd_provider, desc->dtpd_mod, 10360 desc->dtpd_func, desc->dtpd_name); 10361 } 10362 } 10363 10364 static void 10365 dtrace_enabling_destroy(dtrace_enabling_t *enab) 10366 { 10367 int i; 10368 dtrace_ecbdesc_t *ep; 10369 dtrace_vstate_t *vstate = enab->dten_vstate; 10370 10371 ASSERT(MUTEX_HELD(&dtrace_lock)); 10372 10373 for (i = 0; i < enab->dten_ndesc; i++) { 10374 dtrace_actdesc_t *act, *next; 10375 dtrace_predicate_t *pred; 10376 10377 ep = enab->dten_desc[i]; 10378 10379 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 10380 dtrace_predicate_release(pred, vstate); 10381 10382 for (act = ep->dted_action; act != NULL; act = next) { 10383 next = act->dtad_next; 10384 dtrace_actdesc_release(act, vstate); 10385 } 10386 10387 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10388 } 10389 10390 kmem_free(enab->dten_desc, 10391 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 10392 10393 /* 10394 * If this was a retained enabling, decrement the dts_nretained count 10395 * and take it off of the dtrace_retained list. 10396 */ 10397 if (enab->dten_prev != NULL || enab->dten_next != NULL || 10398 dtrace_retained == enab) { 10399 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10400 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 10401 enab->dten_vstate->dtvs_state->dts_nretained--; 10402 } 10403 10404 if (enab->dten_prev == NULL) { 10405 if (dtrace_retained == enab) { 10406 dtrace_retained = enab->dten_next; 10407 10408 if (dtrace_retained != NULL) 10409 dtrace_retained->dten_prev = NULL; 10410 } 10411 } else { 10412 ASSERT(enab != dtrace_retained); 10413 ASSERT(dtrace_retained != NULL); 10414 enab->dten_prev->dten_next = enab->dten_next; 10415 } 10416 10417 if (enab->dten_next != NULL) { 10418 ASSERT(dtrace_retained != NULL); 10419 enab->dten_next->dten_prev = enab->dten_prev; 10420 } 10421 10422 kmem_free(enab, sizeof (dtrace_enabling_t)); 10423 } 10424 10425 static int 10426 dtrace_enabling_retain(dtrace_enabling_t *enab) 10427 { 10428 dtrace_state_t *state; 10429 10430 ASSERT(MUTEX_HELD(&dtrace_lock)); 10431 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10432 ASSERT(enab->dten_vstate != NULL); 10433 10434 state = enab->dten_vstate->dtvs_state; 10435 ASSERT(state != NULL); 10436 10437 /* 10438 * We only allow each state to retain dtrace_retain_max enablings. 10439 */ 10440 if (state->dts_nretained >= dtrace_retain_max) 10441 return (ENOSPC); 10442 10443 state->dts_nretained++; 10444 10445 if (dtrace_retained == NULL) { 10446 dtrace_retained = enab; 10447 return (0); 10448 } 10449 10450 enab->dten_next = dtrace_retained; 10451 dtrace_retained->dten_prev = enab; 10452 dtrace_retained = enab; 10453 10454 return (0); 10455 } 10456 10457 static int 10458 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 10459 dtrace_probedesc_t *create) 10460 { 10461 dtrace_enabling_t *new, *enab; 10462 int found = 0, err = ENOENT; 10463 10464 ASSERT(MUTEX_HELD(&dtrace_lock)); 10465 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 10466 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 10467 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 10468 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 10469 10470 new = dtrace_enabling_create(&state->dts_vstate); 10471 10472 /* 10473 * Iterate over all retained enablings, looking for enablings that 10474 * match the specified state. 10475 */ 10476 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10477 int i; 10478 10479 /* 10480 * dtvs_state can only be NULL for helper enablings -- and 10481 * helper enablings can't be retained. 10482 */ 10483 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10484 10485 if (enab->dten_vstate->dtvs_state != state) 10486 continue; 10487 10488 /* 10489 * Now iterate over each probe description; we're looking for 10490 * an exact match to the specified probe description. 10491 */ 10492 for (i = 0; i < enab->dten_ndesc; i++) { 10493 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10494 dtrace_probedesc_t *pd = &ep->dted_probe; 10495 10496 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 10497 continue; 10498 10499 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 10500 continue; 10501 10502 if (strcmp(pd->dtpd_func, match->dtpd_func)) 10503 continue; 10504 10505 if (strcmp(pd->dtpd_name, match->dtpd_name)) 10506 continue; 10507 10508 /* 10509 * We have a winning probe! Add it to our growing 10510 * enabling. 10511 */ 10512 found = 1; 10513 dtrace_enabling_addlike(new, ep, create); 10514 } 10515 } 10516 10517 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 10518 dtrace_enabling_destroy(new); 10519 return (err); 10520 } 10521 10522 return (0); 10523 } 10524 10525 static void 10526 dtrace_enabling_retract(dtrace_state_t *state) 10527 { 10528 dtrace_enabling_t *enab, *next; 10529 10530 ASSERT(MUTEX_HELD(&dtrace_lock)); 10531 10532 /* 10533 * Iterate over all retained enablings, destroy the enablings retained 10534 * for the specified state. 10535 */ 10536 for (enab = dtrace_retained; enab != NULL; enab = next) { 10537 next = enab->dten_next; 10538 10539 /* 10540 * dtvs_state can only be NULL for helper enablings -- and 10541 * helper enablings can't be retained. 10542 */ 10543 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10544 10545 if (enab->dten_vstate->dtvs_state == state) { 10546 ASSERT(state->dts_nretained > 0); 10547 dtrace_enabling_destroy(enab); 10548 } 10549 } 10550 10551 ASSERT(state->dts_nretained == 0); 10552 } 10553 10554 static int 10555 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 10556 { 10557 int i = 0; 10558 int matched = 0; 10559 10560 ASSERT(MUTEX_HELD(&cpu_lock)); 10561 ASSERT(MUTEX_HELD(&dtrace_lock)); 10562 10563 for (i = 0; i < enab->dten_ndesc; i++) { 10564 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10565 10566 enab->dten_current = ep; 10567 enab->dten_error = 0; 10568 10569 matched += dtrace_probe_enable(&ep->dted_probe, enab); 10570 10571 if (enab->dten_error != 0) { 10572 /* 10573 * If we get an error half-way through enabling the 10574 * probes, we kick out -- perhaps with some number of 10575 * them enabled. Leaving enabled probes enabled may 10576 * be slightly confusing for user-level, but we expect 10577 * that no one will attempt to actually drive on in 10578 * the face of such errors. If this is an anonymous 10579 * enabling (indicated with a NULL nmatched pointer), 10580 * we cmn_err() a message. We aren't expecting to 10581 * get such an error -- such as it can exist at all, 10582 * it would be a result of corrupted DOF in the driver 10583 * properties. 10584 */ 10585 if (nmatched == NULL) { 10586 cmn_err(CE_WARN, "dtrace_enabling_match() " 10587 "error on %p: %d", (void *)ep, 10588 enab->dten_error); 10589 } 10590 10591 return (enab->dten_error); 10592 } 10593 } 10594 10595 enab->dten_probegen = dtrace_probegen; 10596 if (nmatched != NULL) 10597 *nmatched = matched; 10598 10599 return (0); 10600 } 10601 10602 static void 10603 dtrace_enabling_matchall(void) 10604 { 10605 dtrace_enabling_t *enab; 10606 10607 mutex_enter(&cpu_lock); 10608 mutex_enter(&dtrace_lock); 10609 10610 /* 10611 * Because we can be called after dtrace_detach() has been called, we 10612 * cannot assert that there are retained enablings. We can safely 10613 * load from dtrace_retained, however: the taskq_destroy() at the 10614 * end of dtrace_detach() will block pending our completion. 10615 */ 10616 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 10617 (void) dtrace_enabling_match(enab, NULL); 10618 10619 mutex_exit(&dtrace_lock); 10620 mutex_exit(&cpu_lock); 10621 } 10622 10623 static int 10624 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 10625 { 10626 dtrace_enabling_t *enab; 10627 int matched, total = 0, err; 10628 10629 ASSERT(MUTEX_HELD(&cpu_lock)); 10630 ASSERT(MUTEX_HELD(&dtrace_lock)); 10631 10632 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10633 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10634 10635 if (enab->dten_vstate->dtvs_state != state) 10636 continue; 10637 10638 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 10639 return (err); 10640 10641 total += matched; 10642 } 10643 10644 if (nmatched != NULL) 10645 *nmatched = total; 10646 10647 return (0); 10648 } 10649 10650 /* 10651 * If an enabling is to be enabled without having matched probes (that is, if 10652 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 10653 * enabling must be _primed_ by creating an ECB for every ECB description. 10654 * This must be done to assure that we know the number of speculations, the 10655 * number of aggregations, the minimum buffer size needed, etc. before we 10656 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 10657 * enabling any probes, we create ECBs for every ECB decription, but with a 10658 * NULL probe -- which is exactly what this function does. 10659 */ 10660 static void 10661 dtrace_enabling_prime(dtrace_state_t *state) 10662 { 10663 dtrace_enabling_t *enab; 10664 int i; 10665 10666 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10667 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10668 10669 if (enab->dten_vstate->dtvs_state != state) 10670 continue; 10671 10672 /* 10673 * We don't want to prime an enabling more than once, lest 10674 * we allow a malicious user to induce resource exhaustion. 10675 * (The ECBs that result from priming an enabling aren't 10676 * leaked -- but they also aren't deallocated until the 10677 * consumer state is destroyed.) 10678 */ 10679 if (enab->dten_primed) 10680 continue; 10681 10682 for (i = 0; i < enab->dten_ndesc; i++) { 10683 enab->dten_current = enab->dten_desc[i]; 10684 (void) dtrace_probe_enable(NULL, enab); 10685 } 10686 10687 enab->dten_primed = 1; 10688 } 10689 } 10690 10691 /* 10692 * Called to indicate that probes should be provided due to retained 10693 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 10694 * must take an initial lap through the enabling calling the dtps_provide() 10695 * entry point explicitly to allow for autocreated probes. 10696 */ 10697 static void 10698 dtrace_enabling_provide(dtrace_provider_t *prv) 10699 { 10700 int i, all = 0; 10701 dtrace_probedesc_t desc; 10702 10703 ASSERT(MUTEX_HELD(&dtrace_lock)); 10704 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 10705 10706 if (prv == NULL) { 10707 all = 1; 10708 prv = dtrace_provider; 10709 } 10710 10711 do { 10712 dtrace_enabling_t *enab = dtrace_retained; 10713 void *parg = prv->dtpv_arg; 10714 10715 for (; enab != NULL; enab = enab->dten_next) { 10716 for (i = 0; i < enab->dten_ndesc; i++) { 10717 desc = enab->dten_desc[i]->dted_probe; 10718 mutex_exit(&dtrace_lock); 10719 prv->dtpv_pops.dtps_provide(parg, &desc); 10720 mutex_enter(&dtrace_lock); 10721 } 10722 } 10723 } while (all && (prv = prv->dtpv_next) != NULL); 10724 10725 mutex_exit(&dtrace_lock); 10726 dtrace_probe_provide(NULL, all ? NULL : prv); 10727 mutex_enter(&dtrace_lock); 10728 } 10729 10730 /* 10731 * DTrace DOF Functions 10732 */ 10733 /*ARGSUSED*/ 10734 static void 10735 dtrace_dof_error(dof_hdr_t *dof, const char *str) 10736 { 10737 if (dtrace_err_verbose) 10738 cmn_err(CE_WARN, "failed to process DOF: %s", str); 10739 10740 #ifdef DTRACE_ERRDEBUG 10741 dtrace_errdebug(str); 10742 #endif 10743 } 10744 10745 /* 10746 * Create DOF out of a currently enabled state. Right now, we only create 10747 * DOF containing the run-time options -- but this could be expanded to create 10748 * complete DOF representing the enabled state. 10749 */ 10750 static dof_hdr_t * 10751 dtrace_dof_create(dtrace_state_t *state) 10752 { 10753 dof_hdr_t *dof; 10754 dof_sec_t *sec; 10755 dof_optdesc_t *opt; 10756 int i, len = sizeof (dof_hdr_t) + 10757 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 10758 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10759 10760 ASSERT(MUTEX_HELD(&dtrace_lock)); 10761 10762 dof = kmem_zalloc(len, KM_SLEEP); 10763 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 10764 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 10765 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 10766 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 10767 10768 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 10769 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 10770 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 10771 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 10772 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 10773 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 10774 10775 dof->dofh_flags = 0; 10776 dof->dofh_hdrsize = sizeof (dof_hdr_t); 10777 dof->dofh_secsize = sizeof (dof_sec_t); 10778 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 10779 dof->dofh_secoff = sizeof (dof_hdr_t); 10780 dof->dofh_loadsz = len; 10781 dof->dofh_filesz = len; 10782 dof->dofh_pad = 0; 10783 10784 /* 10785 * Fill in the option section header... 10786 */ 10787 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 10788 sec->dofs_type = DOF_SECT_OPTDESC; 10789 sec->dofs_align = sizeof (uint64_t); 10790 sec->dofs_flags = DOF_SECF_LOAD; 10791 sec->dofs_entsize = sizeof (dof_optdesc_t); 10792 10793 opt = (dof_optdesc_t *)((uintptr_t)sec + 10794 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 10795 10796 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 10797 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10798 10799 for (i = 0; i < DTRACEOPT_MAX; i++) { 10800 opt[i].dofo_option = i; 10801 opt[i].dofo_strtab = DOF_SECIDX_NONE; 10802 opt[i].dofo_value = state->dts_options[i]; 10803 } 10804 10805 return (dof); 10806 } 10807 10808 static dof_hdr_t * 10809 dtrace_dof_copyin(uintptr_t uarg, int *errp) 10810 { 10811 dof_hdr_t hdr, *dof; 10812 10813 ASSERT(!MUTEX_HELD(&dtrace_lock)); 10814 10815 /* 10816 * First, we're going to copyin() the sizeof (dof_hdr_t). 10817 */ 10818 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 10819 dtrace_dof_error(NULL, "failed to copyin DOF header"); 10820 *errp = EFAULT; 10821 return (NULL); 10822 } 10823 10824 /* 10825 * Now we'll allocate the entire DOF and copy it in -- provided 10826 * that the length isn't outrageous. 10827 */ 10828 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 10829 dtrace_dof_error(&hdr, "load size exceeds maximum"); 10830 *errp = E2BIG; 10831 return (NULL); 10832 } 10833 10834 if (hdr.dofh_loadsz < sizeof (hdr)) { 10835 dtrace_dof_error(&hdr, "invalid load size"); 10836 *errp = EINVAL; 10837 return (NULL); 10838 } 10839 10840 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 10841 10842 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 10843 kmem_free(dof, hdr.dofh_loadsz); 10844 *errp = EFAULT; 10845 return (NULL); 10846 } 10847 10848 return (dof); 10849 } 10850 10851 static dof_hdr_t * 10852 dtrace_dof_property(const char *name) 10853 { 10854 uchar_t *buf; 10855 uint64_t loadsz; 10856 unsigned int len, i; 10857 dof_hdr_t *dof; 10858 10859 /* 10860 * Unfortunately, array of values in .conf files are always (and 10861 * only) interpreted to be integer arrays. We must read our DOF 10862 * as an integer array, and then squeeze it into a byte array. 10863 */ 10864 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 10865 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 10866 return (NULL); 10867 10868 for (i = 0; i < len; i++) 10869 buf[i] = (uchar_t)(((int *)buf)[i]); 10870 10871 if (len < sizeof (dof_hdr_t)) { 10872 ddi_prop_free(buf); 10873 dtrace_dof_error(NULL, "truncated header"); 10874 return (NULL); 10875 } 10876 10877 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 10878 ddi_prop_free(buf); 10879 dtrace_dof_error(NULL, "truncated DOF"); 10880 return (NULL); 10881 } 10882 10883 if (loadsz >= dtrace_dof_maxsize) { 10884 ddi_prop_free(buf); 10885 dtrace_dof_error(NULL, "oversized DOF"); 10886 return (NULL); 10887 } 10888 10889 dof = kmem_alloc(loadsz, KM_SLEEP); 10890 bcopy(buf, dof, loadsz); 10891 ddi_prop_free(buf); 10892 10893 return (dof); 10894 } 10895 10896 static void 10897 dtrace_dof_destroy(dof_hdr_t *dof) 10898 { 10899 kmem_free(dof, dof->dofh_loadsz); 10900 } 10901 10902 /* 10903 * Return the dof_sec_t pointer corresponding to a given section index. If the 10904 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 10905 * a type other than DOF_SECT_NONE is specified, the header is checked against 10906 * this type and NULL is returned if the types do not match. 10907 */ 10908 static dof_sec_t * 10909 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 10910 { 10911 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 10912 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 10913 10914 if (i >= dof->dofh_secnum) { 10915 dtrace_dof_error(dof, "referenced section index is invalid"); 10916 return (NULL); 10917 } 10918 10919 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 10920 dtrace_dof_error(dof, "referenced section is not loadable"); 10921 return (NULL); 10922 } 10923 10924 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 10925 dtrace_dof_error(dof, "referenced section is the wrong type"); 10926 return (NULL); 10927 } 10928 10929 return (sec); 10930 } 10931 10932 static dtrace_probedesc_t * 10933 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 10934 { 10935 dof_probedesc_t *probe; 10936 dof_sec_t *strtab; 10937 uintptr_t daddr = (uintptr_t)dof; 10938 uintptr_t str; 10939 size_t size; 10940 10941 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 10942 dtrace_dof_error(dof, "invalid probe section"); 10943 return (NULL); 10944 } 10945 10946 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10947 dtrace_dof_error(dof, "bad alignment in probe description"); 10948 return (NULL); 10949 } 10950 10951 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10952 dtrace_dof_error(dof, "truncated probe description"); 10953 return (NULL); 10954 } 10955 10956 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10957 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10958 10959 if (strtab == NULL) 10960 return (NULL); 10961 10962 str = daddr + strtab->dofs_offset; 10963 size = strtab->dofs_size; 10964 10965 if (probe->dofp_provider >= strtab->dofs_size) { 10966 dtrace_dof_error(dof, "corrupt probe provider"); 10967 return (NULL); 10968 } 10969 10970 (void) strncpy(desc->dtpd_provider, 10971 (char *)(str + probe->dofp_provider), 10972 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10973 10974 if (probe->dofp_mod >= strtab->dofs_size) { 10975 dtrace_dof_error(dof, "corrupt probe module"); 10976 return (NULL); 10977 } 10978 10979 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10980 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10981 10982 if (probe->dofp_func >= strtab->dofs_size) { 10983 dtrace_dof_error(dof, "corrupt probe function"); 10984 return (NULL); 10985 } 10986 10987 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10988 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10989 10990 if (probe->dofp_name >= strtab->dofs_size) { 10991 dtrace_dof_error(dof, "corrupt probe name"); 10992 return (NULL); 10993 } 10994 10995 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10996 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10997 10998 return (desc); 10999 } 11000 11001 static dtrace_difo_t * 11002 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11003 cred_t *cr) 11004 { 11005 dtrace_difo_t *dp; 11006 size_t ttl = 0; 11007 dof_difohdr_t *dofd; 11008 uintptr_t daddr = (uintptr_t)dof; 11009 size_t max = dtrace_difo_maxsize; 11010 int i, l, n; 11011 11012 static const struct { 11013 int section; 11014 int bufoffs; 11015 int lenoffs; 11016 int entsize; 11017 int align; 11018 const char *msg; 11019 } difo[] = { 11020 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11021 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11022 sizeof (dif_instr_t), "multiple DIF sections" }, 11023 11024 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11025 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11026 sizeof (uint64_t), "multiple integer tables" }, 11027 11028 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11029 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11030 sizeof (char), "multiple string tables" }, 11031 11032 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11033 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11034 sizeof (uint_t), "multiple variable tables" }, 11035 11036 { DOF_SECT_NONE, 0, 0, 0, NULL } 11037 }; 11038 11039 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11040 dtrace_dof_error(dof, "invalid DIFO header section"); 11041 return (NULL); 11042 } 11043 11044 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11045 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11046 return (NULL); 11047 } 11048 11049 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11050 sec->dofs_size % sizeof (dof_secidx_t)) { 11051 dtrace_dof_error(dof, "bad size in DIFO header"); 11052 return (NULL); 11053 } 11054 11055 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11056 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11057 11058 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11059 dp->dtdo_rtype = dofd->dofd_rtype; 11060 11061 for (l = 0; l < n; l++) { 11062 dof_sec_t *subsec; 11063 void **bufp; 11064 uint32_t *lenp; 11065 11066 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11067 dofd->dofd_links[l])) == NULL) 11068 goto err; /* invalid section link */ 11069 11070 if (ttl + subsec->dofs_size > max) { 11071 dtrace_dof_error(dof, "exceeds maximum size"); 11072 goto err; 11073 } 11074 11075 ttl += subsec->dofs_size; 11076 11077 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11078 if (subsec->dofs_type != difo[i].section) 11079 continue; 11080 11081 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11082 dtrace_dof_error(dof, "section not loaded"); 11083 goto err; 11084 } 11085 11086 if (subsec->dofs_align != difo[i].align) { 11087 dtrace_dof_error(dof, "bad alignment"); 11088 goto err; 11089 } 11090 11091 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11092 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11093 11094 if (*bufp != NULL) { 11095 dtrace_dof_error(dof, difo[i].msg); 11096 goto err; 11097 } 11098 11099 if (difo[i].entsize != subsec->dofs_entsize) { 11100 dtrace_dof_error(dof, "entry size mismatch"); 11101 goto err; 11102 } 11103 11104 if (subsec->dofs_entsize != 0 && 11105 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11106 dtrace_dof_error(dof, "corrupt entry size"); 11107 goto err; 11108 } 11109 11110 *lenp = subsec->dofs_size; 11111 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11112 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11113 *bufp, subsec->dofs_size); 11114 11115 if (subsec->dofs_entsize != 0) 11116 *lenp /= subsec->dofs_entsize; 11117 11118 break; 11119 } 11120 11121 /* 11122 * If we encounter a loadable DIFO sub-section that is not 11123 * known to us, assume this is a broken program and fail. 11124 */ 11125 if (difo[i].section == DOF_SECT_NONE && 11126 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11127 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11128 goto err; 11129 } 11130 } 11131 11132 if (dp->dtdo_buf == NULL) { 11133 /* 11134 * We can't have a DIF object without DIF text. 11135 */ 11136 dtrace_dof_error(dof, "missing DIF text"); 11137 goto err; 11138 } 11139 11140 /* 11141 * Before we validate the DIF object, run through the variable table 11142 * looking for the strings -- if any of their size are under, we'll set 11143 * their size to be the system-wide default string size. Note that 11144 * this should _not_ happen if the "strsize" option has been set -- 11145 * in this case, the compiler should have set the size to reflect the 11146 * setting of the option. 11147 */ 11148 for (i = 0; i < dp->dtdo_varlen; i++) { 11149 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 11150 dtrace_diftype_t *t = &v->dtdv_type; 11151 11152 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 11153 continue; 11154 11155 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 11156 t->dtdt_size = dtrace_strsize_default; 11157 } 11158 11159 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 11160 goto err; 11161 11162 dtrace_difo_init(dp, vstate); 11163 return (dp); 11164 11165 err: 11166 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 11167 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 11168 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 11169 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 11170 11171 kmem_free(dp, sizeof (dtrace_difo_t)); 11172 return (NULL); 11173 } 11174 11175 static dtrace_predicate_t * 11176 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11177 cred_t *cr) 11178 { 11179 dtrace_difo_t *dp; 11180 11181 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 11182 return (NULL); 11183 11184 return (dtrace_predicate_create(dp)); 11185 } 11186 11187 static dtrace_actdesc_t * 11188 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11189 cred_t *cr) 11190 { 11191 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 11192 dof_actdesc_t *desc; 11193 dof_sec_t *difosec; 11194 size_t offs; 11195 uintptr_t daddr = (uintptr_t)dof; 11196 uint64_t arg; 11197 dtrace_actkind_t kind; 11198 11199 if (sec->dofs_type != DOF_SECT_ACTDESC) { 11200 dtrace_dof_error(dof, "invalid action section"); 11201 return (NULL); 11202 } 11203 11204 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 11205 dtrace_dof_error(dof, "truncated action description"); 11206 return (NULL); 11207 } 11208 11209 if (sec->dofs_align != sizeof (uint64_t)) { 11210 dtrace_dof_error(dof, "bad alignment in action description"); 11211 return (NULL); 11212 } 11213 11214 if (sec->dofs_size < sec->dofs_entsize) { 11215 dtrace_dof_error(dof, "section entry size exceeds total size"); 11216 return (NULL); 11217 } 11218 11219 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 11220 dtrace_dof_error(dof, "bad entry size in action description"); 11221 return (NULL); 11222 } 11223 11224 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 11225 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 11226 return (NULL); 11227 } 11228 11229 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 11230 desc = (dof_actdesc_t *)(daddr + 11231 (uintptr_t)sec->dofs_offset + offs); 11232 kind = (dtrace_actkind_t)desc->dofa_kind; 11233 11234 if (DTRACEACT_ISPRINTFLIKE(kind) && 11235 (kind != DTRACEACT_PRINTA || 11236 desc->dofa_strtab != DOF_SECIDX_NONE)) { 11237 dof_sec_t *strtab; 11238 char *str, *fmt; 11239 uint64_t i; 11240 11241 /* 11242 * printf()-like actions must have a format string. 11243 */ 11244 if ((strtab = dtrace_dof_sect(dof, 11245 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 11246 goto err; 11247 11248 str = (char *)((uintptr_t)dof + 11249 (uintptr_t)strtab->dofs_offset); 11250 11251 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 11252 if (str[i] == '\0') 11253 break; 11254 } 11255 11256 if (i >= strtab->dofs_size) { 11257 dtrace_dof_error(dof, "bogus format string"); 11258 goto err; 11259 } 11260 11261 if (i == desc->dofa_arg) { 11262 dtrace_dof_error(dof, "empty format string"); 11263 goto err; 11264 } 11265 11266 i -= desc->dofa_arg; 11267 fmt = kmem_alloc(i + 1, KM_SLEEP); 11268 bcopy(&str[desc->dofa_arg], fmt, i + 1); 11269 arg = (uint64_t)(uintptr_t)fmt; 11270 } else { 11271 if (kind == DTRACEACT_PRINTA) { 11272 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 11273 arg = 0; 11274 } else { 11275 arg = desc->dofa_arg; 11276 } 11277 } 11278 11279 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 11280 desc->dofa_uarg, arg); 11281 11282 if (last != NULL) { 11283 last->dtad_next = act; 11284 } else { 11285 first = act; 11286 } 11287 11288 last = act; 11289 11290 if (desc->dofa_difo == DOF_SECIDX_NONE) 11291 continue; 11292 11293 if ((difosec = dtrace_dof_sect(dof, 11294 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 11295 goto err; 11296 11297 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 11298 11299 if (act->dtad_difo == NULL) 11300 goto err; 11301 } 11302 11303 ASSERT(first != NULL); 11304 return (first); 11305 11306 err: 11307 for (act = first; act != NULL; act = next) { 11308 next = act->dtad_next; 11309 dtrace_actdesc_release(act, vstate); 11310 } 11311 11312 return (NULL); 11313 } 11314 11315 static dtrace_ecbdesc_t * 11316 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11317 cred_t *cr) 11318 { 11319 dtrace_ecbdesc_t *ep; 11320 dof_ecbdesc_t *ecb; 11321 dtrace_probedesc_t *desc; 11322 dtrace_predicate_t *pred = NULL; 11323 11324 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 11325 dtrace_dof_error(dof, "truncated ECB description"); 11326 return (NULL); 11327 } 11328 11329 if (sec->dofs_align != sizeof (uint64_t)) { 11330 dtrace_dof_error(dof, "bad alignment in ECB description"); 11331 return (NULL); 11332 } 11333 11334 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 11335 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 11336 11337 if (sec == NULL) 11338 return (NULL); 11339 11340 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11341 ep->dted_uarg = ecb->dofe_uarg; 11342 desc = &ep->dted_probe; 11343 11344 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 11345 goto err; 11346 11347 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 11348 if ((sec = dtrace_dof_sect(dof, 11349 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 11350 goto err; 11351 11352 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 11353 goto err; 11354 11355 ep->dted_pred.dtpdd_predicate = pred; 11356 } 11357 11358 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 11359 if ((sec = dtrace_dof_sect(dof, 11360 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 11361 goto err; 11362 11363 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 11364 11365 if (ep->dted_action == NULL) 11366 goto err; 11367 } 11368 11369 return (ep); 11370 11371 err: 11372 if (pred != NULL) 11373 dtrace_predicate_release(pred, vstate); 11374 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11375 return (NULL); 11376 } 11377 11378 /* 11379 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 11380 * specified DOF. At present, this amounts to simply adding 'ubase' to the 11381 * site of any user SETX relocations to account for load object base address. 11382 * In the future, if we need other relocations, this function can be extended. 11383 */ 11384 static int 11385 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 11386 { 11387 uintptr_t daddr = (uintptr_t)dof; 11388 dof_relohdr_t *dofr = 11389 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11390 dof_sec_t *ss, *rs, *ts; 11391 dof_relodesc_t *r; 11392 uint_t i, n; 11393 11394 if (sec->dofs_size < sizeof (dof_relohdr_t) || 11395 sec->dofs_align != sizeof (dof_secidx_t)) { 11396 dtrace_dof_error(dof, "invalid relocation header"); 11397 return (-1); 11398 } 11399 11400 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 11401 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 11402 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 11403 11404 if (ss == NULL || rs == NULL || ts == NULL) 11405 return (-1); /* dtrace_dof_error() has been called already */ 11406 11407 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 11408 rs->dofs_align != sizeof (uint64_t)) { 11409 dtrace_dof_error(dof, "invalid relocation section"); 11410 return (-1); 11411 } 11412 11413 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 11414 n = rs->dofs_size / rs->dofs_entsize; 11415 11416 for (i = 0; i < n; i++) { 11417 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 11418 11419 switch (r->dofr_type) { 11420 case DOF_RELO_NONE: 11421 break; 11422 case DOF_RELO_SETX: 11423 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 11424 sizeof (uint64_t) > ts->dofs_size) { 11425 dtrace_dof_error(dof, "bad relocation offset"); 11426 return (-1); 11427 } 11428 11429 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 11430 dtrace_dof_error(dof, "misaligned setx relo"); 11431 return (-1); 11432 } 11433 11434 *(uint64_t *)taddr += ubase; 11435 break; 11436 default: 11437 dtrace_dof_error(dof, "invalid relocation type"); 11438 return (-1); 11439 } 11440 11441 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 11442 } 11443 11444 return (0); 11445 } 11446 11447 /* 11448 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 11449 * header: it should be at the front of a memory region that is at least 11450 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 11451 * size. It need not be validated in any other way. 11452 */ 11453 static int 11454 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 11455 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 11456 { 11457 uint64_t len = dof->dofh_loadsz, seclen; 11458 uintptr_t daddr = (uintptr_t)dof; 11459 dtrace_ecbdesc_t *ep; 11460 dtrace_enabling_t *enab; 11461 uint_t i; 11462 11463 ASSERT(MUTEX_HELD(&dtrace_lock)); 11464 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 11465 11466 /* 11467 * Check the DOF header identification bytes. In addition to checking 11468 * valid settings, we also verify that unused bits/bytes are zeroed so 11469 * we can use them later without fear of regressing existing binaries. 11470 */ 11471 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 11472 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 11473 dtrace_dof_error(dof, "DOF magic string mismatch"); 11474 return (-1); 11475 } 11476 11477 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 11478 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 11479 dtrace_dof_error(dof, "DOF has invalid data model"); 11480 return (-1); 11481 } 11482 11483 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 11484 dtrace_dof_error(dof, "DOF encoding mismatch"); 11485 return (-1); 11486 } 11487 11488 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 11489 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 11490 dtrace_dof_error(dof, "DOF version mismatch"); 11491 return (-1); 11492 } 11493 11494 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 11495 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 11496 return (-1); 11497 } 11498 11499 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 11500 dtrace_dof_error(dof, "DOF uses too many integer registers"); 11501 return (-1); 11502 } 11503 11504 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 11505 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 11506 return (-1); 11507 } 11508 11509 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 11510 if (dof->dofh_ident[i] != 0) { 11511 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 11512 return (-1); 11513 } 11514 } 11515 11516 if (dof->dofh_flags & ~DOF_FL_VALID) { 11517 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 11518 return (-1); 11519 } 11520 11521 if (dof->dofh_secsize == 0) { 11522 dtrace_dof_error(dof, "zero section header size"); 11523 return (-1); 11524 } 11525 11526 /* 11527 * Check that the section headers don't exceed the amount of DOF 11528 * data. Note that we cast the section size and number of sections 11529 * to uint64_t's to prevent possible overflow in the multiplication. 11530 */ 11531 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 11532 11533 if (dof->dofh_secoff > len || seclen > len || 11534 dof->dofh_secoff + seclen > len) { 11535 dtrace_dof_error(dof, "truncated section headers"); 11536 return (-1); 11537 } 11538 11539 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 11540 dtrace_dof_error(dof, "misaligned section headers"); 11541 return (-1); 11542 } 11543 11544 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 11545 dtrace_dof_error(dof, "misaligned section size"); 11546 return (-1); 11547 } 11548 11549 /* 11550 * Take an initial pass through the section headers to be sure that 11551 * the headers don't have stray offsets. If the 'noprobes' flag is 11552 * set, do not permit sections relating to providers, probes, or args. 11553 */ 11554 for (i = 0; i < dof->dofh_secnum; i++) { 11555 dof_sec_t *sec = (dof_sec_t *)(daddr + 11556 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11557 11558 if (noprobes) { 11559 switch (sec->dofs_type) { 11560 case DOF_SECT_PROVIDER: 11561 case DOF_SECT_PROBES: 11562 case DOF_SECT_PRARGS: 11563 case DOF_SECT_PROFFS: 11564 dtrace_dof_error(dof, "illegal sections " 11565 "for enabling"); 11566 return (-1); 11567 } 11568 } 11569 11570 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11571 continue; /* just ignore non-loadable sections */ 11572 11573 if (sec->dofs_align & (sec->dofs_align - 1)) { 11574 dtrace_dof_error(dof, "bad section alignment"); 11575 return (-1); 11576 } 11577 11578 if (sec->dofs_offset & (sec->dofs_align - 1)) { 11579 dtrace_dof_error(dof, "misaligned section"); 11580 return (-1); 11581 } 11582 11583 if (sec->dofs_offset > len || sec->dofs_size > len || 11584 sec->dofs_offset + sec->dofs_size > len) { 11585 dtrace_dof_error(dof, "corrupt section header"); 11586 return (-1); 11587 } 11588 11589 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 11590 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 11591 dtrace_dof_error(dof, "non-terminating string table"); 11592 return (-1); 11593 } 11594 } 11595 11596 /* 11597 * Take a second pass through the sections and locate and perform any 11598 * relocations that are present. We do this after the first pass to 11599 * be sure that all sections have had their headers validated. 11600 */ 11601 for (i = 0; i < dof->dofh_secnum; i++) { 11602 dof_sec_t *sec = (dof_sec_t *)(daddr + 11603 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11604 11605 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11606 continue; /* skip sections that are not loadable */ 11607 11608 switch (sec->dofs_type) { 11609 case DOF_SECT_URELHDR: 11610 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 11611 return (-1); 11612 break; 11613 } 11614 } 11615 11616 if ((enab = *enabp) == NULL) 11617 enab = *enabp = dtrace_enabling_create(vstate); 11618 11619 for (i = 0; i < dof->dofh_secnum; i++) { 11620 dof_sec_t *sec = (dof_sec_t *)(daddr + 11621 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11622 11623 if (sec->dofs_type != DOF_SECT_ECBDESC) 11624 continue; 11625 11626 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 11627 dtrace_enabling_destroy(enab); 11628 *enabp = NULL; 11629 return (-1); 11630 } 11631 11632 dtrace_enabling_add(enab, ep); 11633 } 11634 11635 return (0); 11636 } 11637 11638 /* 11639 * Process DOF for any options. This routine assumes that the DOF has been 11640 * at least processed by dtrace_dof_slurp(). 11641 */ 11642 static int 11643 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 11644 { 11645 int i, rval; 11646 uint32_t entsize; 11647 size_t offs; 11648 dof_optdesc_t *desc; 11649 11650 for (i = 0; i < dof->dofh_secnum; i++) { 11651 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 11652 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11653 11654 if (sec->dofs_type != DOF_SECT_OPTDESC) 11655 continue; 11656 11657 if (sec->dofs_align != sizeof (uint64_t)) { 11658 dtrace_dof_error(dof, "bad alignment in " 11659 "option description"); 11660 return (EINVAL); 11661 } 11662 11663 if ((entsize = sec->dofs_entsize) == 0) { 11664 dtrace_dof_error(dof, "zeroed option entry size"); 11665 return (EINVAL); 11666 } 11667 11668 if (entsize < sizeof (dof_optdesc_t)) { 11669 dtrace_dof_error(dof, "bad option entry size"); 11670 return (EINVAL); 11671 } 11672 11673 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 11674 desc = (dof_optdesc_t *)((uintptr_t)dof + 11675 (uintptr_t)sec->dofs_offset + offs); 11676 11677 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 11678 dtrace_dof_error(dof, "non-zero option string"); 11679 return (EINVAL); 11680 } 11681 11682 if (desc->dofo_value == DTRACEOPT_UNSET) { 11683 dtrace_dof_error(dof, "unset option"); 11684 return (EINVAL); 11685 } 11686 11687 if ((rval = dtrace_state_option(state, 11688 desc->dofo_option, desc->dofo_value)) != 0) { 11689 dtrace_dof_error(dof, "rejected option"); 11690 return (rval); 11691 } 11692 } 11693 } 11694 11695 return (0); 11696 } 11697 11698 /* 11699 * DTrace Consumer State Functions 11700 */ 11701 int 11702 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 11703 { 11704 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 11705 void *base; 11706 uintptr_t limit; 11707 dtrace_dynvar_t *dvar, *next, *start; 11708 int i; 11709 11710 ASSERT(MUTEX_HELD(&dtrace_lock)); 11711 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 11712 11713 bzero(dstate, sizeof (dtrace_dstate_t)); 11714 11715 if ((dstate->dtds_chunksize = chunksize) == 0) 11716 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 11717 11718 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 11719 size = min; 11720 11721 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 11722 return (ENOMEM); 11723 11724 dstate->dtds_size = size; 11725 dstate->dtds_base = base; 11726 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 11727 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 11728 11729 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 11730 11731 if (hashsize != 1 && (hashsize & 1)) 11732 hashsize--; 11733 11734 dstate->dtds_hashsize = hashsize; 11735 dstate->dtds_hash = dstate->dtds_base; 11736 11737 /* 11738 * Set all of our hash buckets to point to the single sink, and (if 11739 * it hasn't already been set), set the sink's hash value to be the 11740 * sink sentinel value. The sink is needed for dynamic variable 11741 * lookups to know that they have iterated over an entire, valid hash 11742 * chain. 11743 */ 11744 for (i = 0; i < hashsize; i++) 11745 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 11746 11747 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 11748 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 11749 11750 /* 11751 * Determine number of active CPUs. Divide free list evenly among 11752 * active CPUs. 11753 */ 11754 start = (dtrace_dynvar_t *) 11755 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 11756 limit = (uintptr_t)base + size; 11757 11758 maxper = (limit - (uintptr_t)start) / NCPU; 11759 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 11760 11761 for (i = 0; i < NCPU; i++) { 11762 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 11763 11764 /* 11765 * If we don't even have enough chunks to make it once through 11766 * NCPUs, we're just going to allocate everything to the first 11767 * CPU. And if we're on the last CPU, we're going to allocate 11768 * whatever is left over. In either case, we set the limit to 11769 * be the limit of the dynamic variable space. 11770 */ 11771 if (maxper == 0 || i == NCPU - 1) { 11772 limit = (uintptr_t)base + size; 11773 start = NULL; 11774 } else { 11775 limit = (uintptr_t)start + maxper; 11776 start = (dtrace_dynvar_t *)limit; 11777 } 11778 11779 ASSERT(limit <= (uintptr_t)base + size); 11780 11781 for (;;) { 11782 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 11783 dstate->dtds_chunksize); 11784 11785 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 11786 break; 11787 11788 dvar->dtdv_next = next; 11789 dvar = next; 11790 } 11791 11792 if (maxper == 0) 11793 break; 11794 } 11795 11796 return (0); 11797 } 11798 11799 void 11800 dtrace_dstate_fini(dtrace_dstate_t *dstate) 11801 { 11802 ASSERT(MUTEX_HELD(&cpu_lock)); 11803 11804 if (dstate->dtds_base == NULL) 11805 return; 11806 11807 kmem_free(dstate->dtds_base, dstate->dtds_size); 11808 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 11809 } 11810 11811 static void 11812 dtrace_vstate_fini(dtrace_vstate_t *vstate) 11813 { 11814 /* 11815 * Logical XOR, where are you? 11816 */ 11817 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 11818 11819 if (vstate->dtvs_nglobals > 0) { 11820 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 11821 sizeof (dtrace_statvar_t *)); 11822 } 11823 11824 if (vstate->dtvs_ntlocals > 0) { 11825 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 11826 sizeof (dtrace_difv_t)); 11827 } 11828 11829 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 11830 11831 if (vstate->dtvs_nlocals > 0) { 11832 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 11833 sizeof (dtrace_statvar_t *)); 11834 } 11835 } 11836 11837 static void 11838 dtrace_state_clean(dtrace_state_t *state) 11839 { 11840 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 11841 return; 11842 11843 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 11844 dtrace_speculation_clean(state); 11845 } 11846 11847 static void 11848 dtrace_state_deadman(dtrace_state_t *state) 11849 { 11850 hrtime_t now; 11851 11852 dtrace_sync(); 11853 11854 now = dtrace_gethrtime(); 11855 11856 if (state != dtrace_anon.dta_state && 11857 now - state->dts_laststatus >= dtrace_deadman_user) 11858 return; 11859 11860 /* 11861 * We must be sure that dts_alive never appears to be less than the 11862 * value upon entry to dtrace_state_deadman(), and because we lack a 11863 * dtrace_cas64(), we cannot store to it atomically. We thus instead 11864 * store INT64_MAX to it, followed by a memory barrier, followed by 11865 * the new value. This assures that dts_alive never appears to be 11866 * less than its true value, regardless of the order in which the 11867 * stores to the underlying storage are issued. 11868 */ 11869 state->dts_alive = INT64_MAX; 11870 dtrace_membar_producer(); 11871 state->dts_alive = now; 11872 } 11873 11874 dtrace_state_t * 11875 dtrace_state_create(dev_t *devp, cred_t *cr) 11876 { 11877 minor_t minor; 11878 major_t major; 11879 char c[30]; 11880 dtrace_state_t *state; 11881 dtrace_optval_t *opt; 11882 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 11883 11884 ASSERT(MUTEX_HELD(&dtrace_lock)); 11885 ASSERT(MUTEX_HELD(&cpu_lock)); 11886 11887 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 11888 VM_BESTFIT | VM_SLEEP); 11889 11890 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 11891 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11892 return (NULL); 11893 } 11894 11895 state = ddi_get_soft_state(dtrace_softstate, minor); 11896 state->dts_epid = DTRACE_EPIDNONE + 1; 11897 11898 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 11899 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 11900 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 11901 11902 if (devp != NULL) { 11903 major = getemajor(*devp); 11904 } else { 11905 major = ddi_driver_major(dtrace_devi); 11906 } 11907 11908 state->dts_dev = makedevice(major, minor); 11909 11910 if (devp != NULL) 11911 *devp = state->dts_dev; 11912 11913 /* 11914 * We allocate NCPU buffers. On the one hand, this can be quite 11915 * a bit of memory per instance (nearly 36K on a Starcat). On the 11916 * other hand, it saves an additional memory reference in the probe 11917 * path. 11918 */ 11919 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 11920 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 11921 state->dts_cleaner = CYCLIC_NONE; 11922 state->dts_deadman = CYCLIC_NONE; 11923 state->dts_vstate.dtvs_state = state; 11924 11925 for (i = 0; i < DTRACEOPT_MAX; i++) 11926 state->dts_options[i] = DTRACEOPT_UNSET; 11927 11928 /* 11929 * Set the default options. 11930 */ 11931 opt = state->dts_options; 11932 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 11933 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 11934 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 11935 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 11936 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 11937 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 11938 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 11939 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 11940 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 11941 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 11942 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 11943 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 11944 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 11945 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 11946 11947 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 11948 11949 /* 11950 * Depending on the user credentials, we set flag bits which alter probe 11951 * visibility or the amount of destructiveness allowed. In the case of 11952 * actual anonymous tracing, or the possession of all privileges, all of 11953 * the normal checks are bypassed. 11954 */ 11955 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 11956 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 11957 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 11958 } else { 11959 /* 11960 * Set up the credentials for this instantiation. We take a 11961 * hold on the credential to prevent it from disappearing on 11962 * us; this in turn prevents the zone_t referenced by this 11963 * credential from disappearing. This means that we can 11964 * examine the credential and the zone from probe context. 11965 */ 11966 crhold(cr); 11967 state->dts_cred.dcr_cred = cr; 11968 11969 /* 11970 * CRA_PROC means "we have *some* privilege for dtrace" and 11971 * unlocks the use of variables like pid, zonename, etc. 11972 */ 11973 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 11974 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11975 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11976 } 11977 11978 /* 11979 * dtrace_user allows use of syscall and profile providers. 11980 * If the user also has proc_owner and/or proc_zone, we 11981 * extend the scope to include additional visibility and 11982 * destructive power. 11983 */ 11984 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 11985 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11986 state->dts_cred.dcr_visible |= 11987 DTRACE_CRV_ALLPROC; 11988 11989 state->dts_cred.dcr_action |= 11990 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11991 } 11992 11993 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 11994 state->dts_cred.dcr_visible |= 11995 DTRACE_CRV_ALLZONE; 11996 11997 state->dts_cred.dcr_action |= 11998 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11999 } 12000 12001 /* 12002 * If we have all privs in whatever zone this is, 12003 * we can do destructive things to processes which 12004 * have altered credentials. 12005 */ 12006 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12007 cr->cr_zone->zone_privset)) { 12008 state->dts_cred.dcr_action |= 12009 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12010 } 12011 } 12012 12013 /* 12014 * Holding the dtrace_kernel privilege also implies that 12015 * the user has the dtrace_user privilege from a visibility 12016 * perspective. But without further privileges, some 12017 * destructive actions are not available. 12018 */ 12019 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12020 /* 12021 * Make all probes in all zones visible. However, 12022 * this doesn't mean that all actions become available 12023 * to all zones. 12024 */ 12025 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12026 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12027 12028 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12029 DTRACE_CRA_PROC; 12030 /* 12031 * Holding proc_owner means that destructive actions 12032 * for *this* zone are allowed. 12033 */ 12034 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12035 state->dts_cred.dcr_action |= 12036 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12037 12038 /* 12039 * Holding proc_zone means that destructive actions 12040 * for this user/group ID in all zones is allowed. 12041 */ 12042 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12043 state->dts_cred.dcr_action |= 12044 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12045 12046 /* 12047 * If we have all privs in whatever zone this is, 12048 * we can do destructive things to processes which 12049 * have altered credentials. 12050 */ 12051 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12052 cr->cr_zone->zone_privset)) { 12053 state->dts_cred.dcr_action |= 12054 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12055 } 12056 } 12057 12058 /* 12059 * Holding the dtrace_proc privilege gives control over fasttrap 12060 * and pid providers. We need to grant wider destructive 12061 * privileges in the event that the user has proc_owner and/or 12062 * proc_zone. 12063 */ 12064 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12065 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12066 state->dts_cred.dcr_action |= 12067 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12068 12069 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12070 state->dts_cred.dcr_action |= 12071 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12072 } 12073 } 12074 12075 return (state); 12076 } 12077 12078 static int 12079 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 12080 { 12081 dtrace_optval_t *opt = state->dts_options, size; 12082 processorid_t cpu; 12083 int flags = 0, rval; 12084 12085 ASSERT(MUTEX_HELD(&dtrace_lock)); 12086 ASSERT(MUTEX_HELD(&cpu_lock)); 12087 ASSERT(which < DTRACEOPT_MAX); 12088 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 12089 (state == dtrace_anon.dta_state && 12090 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 12091 12092 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 12093 return (0); 12094 12095 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 12096 cpu = opt[DTRACEOPT_CPU]; 12097 12098 if (which == DTRACEOPT_SPECSIZE) 12099 flags |= DTRACEBUF_NOSWITCH; 12100 12101 if (which == DTRACEOPT_BUFSIZE) { 12102 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 12103 flags |= DTRACEBUF_RING; 12104 12105 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 12106 flags |= DTRACEBUF_FILL; 12107 12108 if (state != dtrace_anon.dta_state || 12109 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12110 flags |= DTRACEBUF_INACTIVE; 12111 } 12112 12113 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 12114 /* 12115 * The size must be 8-byte aligned. If the size is not 8-byte 12116 * aligned, drop it down by the difference. 12117 */ 12118 if (size & (sizeof (uint64_t) - 1)) 12119 size -= size & (sizeof (uint64_t) - 1); 12120 12121 if (size < state->dts_reserve) { 12122 /* 12123 * Buffers always must be large enough to accommodate 12124 * their prereserved space. We return E2BIG instead 12125 * of ENOMEM in this case to allow for user-level 12126 * software to differentiate the cases. 12127 */ 12128 return (E2BIG); 12129 } 12130 12131 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 12132 12133 if (rval != ENOMEM) { 12134 opt[which] = size; 12135 return (rval); 12136 } 12137 12138 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12139 return (rval); 12140 } 12141 12142 return (ENOMEM); 12143 } 12144 12145 static int 12146 dtrace_state_buffers(dtrace_state_t *state) 12147 { 12148 dtrace_speculation_t *spec = state->dts_speculations; 12149 int rval, i; 12150 12151 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 12152 DTRACEOPT_BUFSIZE)) != 0) 12153 return (rval); 12154 12155 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 12156 DTRACEOPT_AGGSIZE)) != 0) 12157 return (rval); 12158 12159 for (i = 0; i < state->dts_nspeculations; i++) { 12160 if ((rval = dtrace_state_buffer(state, 12161 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 12162 return (rval); 12163 } 12164 12165 return (0); 12166 } 12167 12168 static void 12169 dtrace_state_prereserve(dtrace_state_t *state) 12170 { 12171 dtrace_ecb_t *ecb; 12172 dtrace_probe_t *probe; 12173 12174 state->dts_reserve = 0; 12175 12176 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 12177 return; 12178 12179 /* 12180 * If our buffer policy is a "fill" buffer policy, we need to set the 12181 * prereserved space to be the space required by the END probes. 12182 */ 12183 probe = dtrace_probes[dtrace_probeid_end - 1]; 12184 ASSERT(probe != NULL); 12185 12186 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 12187 if (ecb->dte_state != state) 12188 continue; 12189 12190 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 12191 } 12192 } 12193 12194 static int 12195 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 12196 { 12197 dtrace_optval_t *opt = state->dts_options, sz, nspec; 12198 dtrace_speculation_t *spec; 12199 dtrace_buffer_t *buf; 12200 cyc_handler_t hdlr; 12201 cyc_time_t when; 12202 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12203 dtrace_icookie_t cookie; 12204 12205 mutex_enter(&cpu_lock); 12206 mutex_enter(&dtrace_lock); 12207 12208 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 12209 rval = EBUSY; 12210 goto out; 12211 } 12212 12213 /* 12214 * Before we can perform any checks, we must prime all of the 12215 * retained enablings that correspond to this state. 12216 */ 12217 dtrace_enabling_prime(state); 12218 12219 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 12220 rval = EACCES; 12221 goto out; 12222 } 12223 12224 dtrace_state_prereserve(state); 12225 12226 /* 12227 * Now we want to do is try to allocate our speculations. 12228 * We do not automatically resize the number of speculations; if 12229 * this fails, we will fail the operation. 12230 */ 12231 nspec = opt[DTRACEOPT_NSPEC]; 12232 ASSERT(nspec != DTRACEOPT_UNSET); 12233 12234 if (nspec > INT_MAX) { 12235 rval = ENOMEM; 12236 goto out; 12237 } 12238 12239 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 12240 12241 if (spec == NULL) { 12242 rval = ENOMEM; 12243 goto out; 12244 } 12245 12246 state->dts_speculations = spec; 12247 state->dts_nspeculations = (int)nspec; 12248 12249 for (i = 0; i < nspec; i++) { 12250 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 12251 rval = ENOMEM; 12252 goto err; 12253 } 12254 12255 spec[i].dtsp_buffer = buf; 12256 } 12257 12258 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 12259 if (dtrace_anon.dta_state == NULL) { 12260 rval = ENOENT; 12261 goto out; 12262 } 12263 12264 if (state->dts_necbs != 0) { 12265 rval = EALREADY; 12266 goto out; 12267 } 12268 12269 state->dts_anon = dtrace_anon_grab(); 12270 ASSERT(state->dts_anon != NULL); 12271 state = state->dts_anon; 12272 12273 /* 12274 * We want "grabanon" to be set in the grabbed state, so we'll 12275 * copy that option value from the grabbing state into the 12276 * grabbed state. 12277 */ 12278 state->dts_options[DTRACEOPT_GRABANON] = 12279 opt[DTRACEOPT_GRABANON]; 12280 12281 *cpu = dtrace_anon.dta_beganon; 12282 12283 /* 12284 * If the anonymous state is active (as it almost certainly 12285 * is if the anonymous enabling ultimately matched anything), 12286 * we don't allow any further option processing -- but we 12287 * don't return failure. 12288 */ 12289 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12290 goto out; 12291 } 12292 12293 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 12294 opt[DTRACEOPT_AGGSIZE] != 0) { 12295 if (state->dts_aggregations == NULL) { 12296 /* 12297 * We're not going to create an aggregation buffer 12298 * because we don't have any ECBs that contain 12299 * aggregations -- set this option to 0. 12300 */ 12301 opt[DTRACEOPT_AGGSIZE] = 0; 12302 } else { 12303 /* 12304 * If we have an aggregation buffer, we must also have 12305 * a buffer to use as scratch. 12306 */ 12307 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 12308 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 12309 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 12310 } 12311 } 12312 } 12313 12314 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 12315 opt[DTRACEOPT_SPECSIZE] != 0) { 12316 if (!state->dts_speculates) { 12317 /* 12318 * We're not going to create speculation buffers 12319 * because we don't have any ECBs that actually 12320 * speculate -- set the speculation size to 0. 12321 */ 12322 opt[DTRACEOPT_SPECSIZE] = 0; 12323 } 12324 } 12325 12326 /* 12327 * The bare minimum size for any buffer that we're actually going to 12328 * do anything to is sizeof (uint64_t). 12329 */ 12330 sz = sizeof (uint64_t); 12331 12332 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 12333 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 12334 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 12335 /* 12336 * A buffer size has been explicitly set to 0 (or to a size 12337 * that will be adjusted to 0) and we need the space -- we 12338 * need to return failure. We return ENOSPC to differentiate 12339 * it from failing to allocate a buffer due to failure to meet 12340 * the reserve (for which we return E2BIG). 12341 */ 12342 rval = ENOSPC; 12343 goto out; 12344 } 12345 12346 if ((rval = dtrace_state_buffers(state)) != 0) 12347 goto err; 12348 12349 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 12350 sz = dtrace_dstate_defsize; 12351 12352 do { 12353 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 12354 12355 if (rval == 0) 12356 break; 12357 12358 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12359 goto err; 12360 } while (sz >>= 1); 12361 12362 opt[DTRACEOPT_DYNVARSIZE] = sz; 12363 12364 if (rval != 0) 12365 goto err; 12366 12367 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 12368 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 12369 12370 if (opt[DTRACEOPT_CLEANRATE] == 0) 12371 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12372 12373 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 12374 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 12375 12376 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 12377 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12378 12379 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 12380 hdlr.cyh_arg = state; 12381 hdlr.cyh_level = CY_LOW_LEVEL; 12382 12383 when.cyt_when = 0; 12384 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 12385 12386 state->dts_cleaner = cyclic_add(&hdlr, &when); 12387 12388 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 12389 hdlr.cyh_arg = state; 12390 hdlr.cyh_level = CY_LOW_LEVEL; 12391 12392 when.cyt_when = 0; 12393 when.cyt_interval = dtrace_deadman_interval; 12394 12395 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 12396 state->dts_deadman = cyclic_add(&hdlr, &when); 12397 12398 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 12399 12400 /* 12401 * Now it's time to actually fire the BEGIN probe. We need to disable 12402 * interrupts here both to record the CPU on which we fired the BEGIN 12403 * probe (the data from this CPU will be processed first at user 12404 * level) and to manually activate the buffer for this CPU. 12405 */ 12406 cookie = dtrace_interrupt_disable(); 12407 *cpu = CPU->cpu_id; 12408 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 12409 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12410 12411 dtrace_probe(dtrace_probeid_begin, 12412 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12413 dtrace_interrupt_enable(cookie); 12414 /* 12415 * We may have had an exit action from a BEGIN probe; only change our 12416 * state to ACTIVE if we're still in WARMUP. 12417 */ 12418 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 12419 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 12420 12421 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 12422 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 12423 12424 /* 12425 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 12426 * want each CPU to transition its principal buffer out of the 12427 * INACTIVE state. Doing this assures that no CPU will suddenly begin 12428 * processing an ECB halfway down a probe's ECB chain; all CPUs will 12429 * atomically transition from processing none of a state's ECBs to 12430 * processing all of them. 12431 */ 12432 dtrace_xcall(DTRACE_CPUALL, 12433 (dtrace_xcall_t)dtrace_buffer_activate, state); 12434 goto out; 12435 12436 err: 12437 dtrace_buffer_free(state->dts_buffer); 12438 dtrace_buffer_free(state->dts_aggbuffer); 12439 12440 if ((nspec = state->dts_nspeculations) == 0) { 12441 ASSERT(state->dts_speculations == NULL); 12442 goto out; 12443 } 12444 12445 spec = state->dts_speculations; 12446 ASSERT(spec != NULL); 12447 12448 for (i = 0; i < state->dts_nspeculations; i++) { 12449 if ((buf = spec[i].dtsp_buffer) == NULL) 12450 break; 12451 12452 dtrace_buffer_free(buf); 12453 kmem_free(buf, bufsize); 12454 } 12455 12456 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12457 state->dts_nspeculations = 0; 12458 state->dts_speculations = NULL; 12459 12460 out: 12461 mutex_exit(&dtrace_lock); 12462 mutex_exit(&cpu_lock); 12463 12464 return (rval); 12465 } 12466 12467 static int 12468 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 12469 { 12470 dtrace_icookie_t cookie; 12471 12472 ASSERT(MUTEX_HELD(&dtrace_lock)); 12473 12474 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 12475 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 12476 return (EINVAL); 12477 12478 /* 12479 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 12480 * to be sure that every CPU has seen it. See below for the details 12481 * on why this is done. 12482 */ 12483 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 12484 dtrace_sync(); 12485 12486 /* 12487 * By this point, it is impossible for any CPU to be still processing 12488 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 12489 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 12490 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 12491 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 12492 * iff we're in the END probe. 12493 */ 12494 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 12495 dtrace_sync(); 12496 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 12497 12498 /* 12499 * Finally, we can release the reserve and call the END probe. We 12500 * disable interrupts across calling the END probe to allow us to 12501 * return the CPU on which we actually called the END probe. This 12502 * allows user-land to be sure that this CPU's principal buffer is 12503 * processed last. 12504 */ 12505 state->dts_reserve = 0; 12506 12507 cookie = dtrace_interrupt_disable(); 12508 *cpu = CPU->cpu_id; 12509 dtrace_probe(dtrace_probeid_end, 12510 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12511 dtrace_interrupt_enable(cookie); 12512 12513 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 12514 dtrace_sync(); 12515 12516 return (0); 12517 } 12518 12519 static int 12520 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 12521 dtrace_optval_t val) 12522 { 12523 ASSERT(MUTEX_HELD(&dtrace_lock)); 12524 12525 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12526 return (EBUSY); 12527 12528 if (option >= DTRACEOPT_MAX) 12529 return (EINVAL); 12530 12531 if (option != DTRACEOPT_CPU && val < 0) 12532 return (EINVAL); 12533 12534 switch (option) { 12535 case DTRACEOPT_DESTRUCTIVE: 12536 if (dtrace_destructive_disallow) 12537 return (EACCES); 12538 12539 state->dts_cred.dcr_destructive = 1; 12540 break; 12541 12542 case DTRACEOPT_BUFSIZE: 12543 case DTRACEOPT_DYNVARSIZE: 12544 case DTRACEOPT_AGGSIZE: 12545 case DTRACEOPT_SPECSIZE: 12546 case DTRACEOPT_STRSIZE: 12547 if (val < 0) 12548 return (EINVAL); 12549 12550 if (val >= LONG_MAX) { 12551 /* 12552 * If this is an otherwise negative value, set it to 12553 * the highest multiple of 128m less than LONG_MAX. 12554 * Technically, we're adjusting the size without 12555 * regard to the buffer resizing policy, but in fact, 12556 * this has no effect -- if we set the buffer size to 12557 * ~LONG_MAX and the buffer policy is ultimately set to 12558 * be "manual", the buffer allocation is guaranteed to 12559 * fail, if only because the allocation requires two 12560 * buffers. (We set the the size to the highest 12561 * multiple of 128m because it ensures that the size 12562 * will remain a multiple of a megabyte when 12563 * repeatedly halved -- all the way down to 15m.) 12564 */ 12565 val = LONG_MAX - (1 << 27) + 1; 12566 } 12567 } 12568 12569 state->dts_options[option] = val; 12570 12571 return (0); 12572 } 12573 12574 static void 12575 dtrace_state_destroy(dtrace_state_t *state) 12576 { 12577 dtrace_ecb_t *ecb; 12578 dtrace_vstate_t *vstate = &state->dts_vstate; 12579 minor_t minor = getminor(state->dts_dev); 12580 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12581 dtrace_speculation_t *spec = state->dts_speculations; 12582 int nspec = state->dts_nspeculations; 12583 uint32_t match; 12584 12585 ASSERT(MUTEX_HELD(&dtrace_lock)); 12586 ASSERT(MUTEX_HELD(&cpu_lock)); 12587 12588 /* 12589 * First, retract any retained enablings for this state. 12590 */ 12591 dtrace_enabling_retract(state); 12592 ASSERT(state->dts_nretained == 0); 12593 12594 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 12595 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 12596 /* 12597 * We have managed to come into dtrace_state_destroy() on a 12598 * hot enabling -- almost certainly because of a disorderly 12599 * shutdown of a consumer. (That is, a consumer that is 12600 * exiting without having called dtrace_stop().) In this case, 12601 * we're going to set our activity to be KILLED, and then 12602 * issue a sync to be sure that everyone is out of probe 12603 * context before we start blowing away ECBs. 12604 */ 12605 state->dts_activity = DTRACE_ACTIVITY_KILLED; 12606 dtrace_sync(); 12607 } 12608 12609 /* 12610 * Release the credential hold we took in dtrace_state_create(). 12611 */ 12612 if (state->dts_cred.dcr_cred != NULL) 12613 crfree(state->dts_cred.dcr_cred); 12614 12615 /* 12616 * Now we can safely disable and destroy any enabled probes. Because 12617 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 12618 * (especially if they're all enabled), we take two passes through the 12619 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 12620 * in the second we disable whatever is left over. 12621 */ 12622 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 12623 for (i = 0; i < state->dts_necbs; i++) { 12624 if ((ecb = state->dts_ecbs[i]) == NULL) 12625 continue; 12626 12627 if (match && ecb->dte_probe != NULL) { 12628 dtrace_probe_t *probe = ecb->dte_probe; 12629 dtrace_provider_t *prov = probe->dtpr_provider; 12630 12631 if (!(prov->dtpv_priv.dtpp_flags & match)) 12632 continue; 12633 } 12634 12635 dtrace_ecb_disable(ecb); 12636 dtrace_ecb_destroy(ecb); 12637 } 12638 12639 if (!match) 12640 break; 12641 } 12642 12643 /* 12644 * Before we free the buffers, perform one more sync to assure that 12645 * every CPU is out of probe context. 12646 */ 12647 dtrace_sync(); 12648 12649 dtrace_buffer_free(state->dts_buffer); 12650 dtrace_buffer_free(state->dts_aggbuffer); 12651 12652 for (i = 0; i < nspec; i++) 12653 dtrace_buffer_free(spec[i].dtsp_buffer); 12654 12655 if (state->dts_cleaner != CYCLIC_NONE) 12656 cyclic_remove(state->dts_cleaner); 12657 12658 if (state->dts_deadman != CYCLIC_NONE) 12659 cyclic_remove(state->dts_deadman); 12660 12661 dtrace_dstate_fini(&vstate->dtvs_dynvars); 12662 dtrace_vstate_fini(vstate); 12663 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 12664 12665 if (state->dts_aggregations != NULL) { 12666 #ifdef DEBUG 12667 for (i = 0; i < state->dts_naggregations; i++) 12668 ASSERT(state->dts_aggregations[i] == NULL); 12669 #endif 12670 ASSERT(state->dts_naggregations > 0); 12671 kmem_free(state->dts_aggregations, 12672 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 12673 } 12674 12675 kmem_free(state->dts_buffer, bufsize); 12676 kmem_free(state->dts_aggbuffer, bufsize); 12677 12678 for (i = 0; i < nspec; i++) 12679 kmem_free(spec[i].dtsp_buffer, bufsize); 12680 12681 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12682 12683 dtrace_format_destroy(state); 12684 12685 vmem_destroy(state->dts_aggid_arena); 12686 ddi_soft_state_free(dtrace_softstate, minor); 12687 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12688 } 12689 12690 /* 12691 * DTrace Anonymous Enabling Functions 12692 */ 12693 static dtrace_state_t * 12694 dtrace_anon_grab(void) 12695 { 12696 dtrace_state_t *state; 12697 12698 ASSERT(MUTEX_HELD(&dtrace_lock)); 12699 12700 if ((state = dtrace_anon.dta_state) == NULL) { 12701 ASSERT(dtrace_anon.dta_enabling == NULL); 12702 return (NULL); 12703 } 12704 12705 ASSERT(dtrace_anon.dta_enabling != NULL); 12706 ASSERT(dtrace_retained != NULL); 12707 12708 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 12709 dtrace_anon.dta_enabling = NULL; 12710 dtrace_anon.dta_state = NULL; 12711 12712 return (state); 12713 } 12714 12715 static void 12716 dtrace_anon_property(void) 12717 { 12718 int i, rv; 12719 dtrace_state_t *state; 12720 dof_hdr_t *dof; 12721 char c[32]; /* enough for "dof-data-" + digits */ 12722 12723 ASSERT(MUTEX_HELD(&dtrace_lock)); 12724 ASSERT(MUTEX_HELD(&cpu_lock)); 12725 12726 for (i = 0; ; i++) { 12727 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 12728 12729 dtrace_err_verbose = 1; 12730 12731 if ((dof = dtrace_dof_property(c)) == NULL) { 12732 dtrace_err_verbose = 0; 12733 break; 12734 } 12735 12736 /* 12737 * We want to create anonymous state, so we need to transition 12738 * the kernel debugger to indicate that DTrace is active. If 12739 * this fails (e.g. because the debugger has modified text in 12740 * some way), we won't continue with the processing. 12741 */ 12742 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12743 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 12744 "enabling ignored."); 12745 dtrace_dof_destroy(dof); 12746 break; 12747 } 12748 12749 /* 12750 * If we haven't allocated an anonymous state, we'll do so now. 12751 */ 12752 if ((state = dtrace_anon.dta_state) == NULL) { 12753 state = dtrace_state_create(NULL, NULL); 12754 dtrace_anon.dta_state = state; 12755 12756 if (state == NULL) { 12757 /* 12758 * This basically shouldn't happen: the only 12759 * failure mode from dtrace_state_create() is a 12760 * failure of ddi_soft_state_zalloc() that 12761 * itself should never happen. Still, the 12762 * interface allows for a failure mode, and 12763 * we want to fail as gracefully as possible: 12764 * we'll emit an error message and cease 12765 * processing anonymous state in this case. 12766 */ 12767 cmn_err(CE_WARN, "failed to create " 12768 "anonymous state"); 12769 dtrace_dof_destroy(dof); 12770 break; 12771 } 12772 } 12773 12774 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 12775 &dtrace_anon.dta_enabling, 0, B_TRUE); 12776 12777 if (rv == 0) 12778 rv = dtrace_dof_options(dof, state); 12779 12780 dtrace_err_verbose = 0; 12781 dtrace_dof_destroy(dof); 12782 12783 if (rv != 0) { 12784 /* 12785 * This is malformed DOF; chuck any anonymous state 12786 * that we created. 12787 */ 12788 ASSERT(dtrace_anon.dta_enabling == NULL); 12789 dtrace_state_destroy(state); 12790 dtrace_anon.dta_state = NULL; 12791 break; 12792 } 12793 12794 ASSERT(dtrace_anon.dta_enabling != NULL); 12795 } 12796 12797 if (dtrace_anon.dta_enabling != NULL) { 12798 int rval; 12799 12800 /* 12801 * dtrace_enabling_retain() can only fail because we are 12802 * trying to retain more enablings than are allowed -- but 12803 * we only have one anonymous enabling, and we are guaranteed 12804 * to be allowed at least one retained enabling; we assert 12805 * that dtrace_enabling_retain() returns success. 12806 */ 12807 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 12808 ASSERT(rval == 0); 12809 12810 dtrace_enabling_dump(dtrace_anon.dta_enabling); 12811 } 12812 } 12813 12814 /* 12815 * DTrace Helper Functions 12816 */ 12817 static void 12818 dtrace_helper_trace(dtrace_helper_action_t *helper, 12819 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 12820 { 12821 uint32_t size, next, nnext, i; 12822 dtrace_helptrace_t *ent; 12823 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12824 12825 if (!dtrace_helptrace_enabled) 12826 return; 12827 12828 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 12829 12830 /* 12831 * What would a tracing framework be without its own tracing 12832 * framework? (Well, a hell of a lot simpler, for starters...) 12833 */ 12834 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 12835 sizeof (uint64_t) - sizeof (uint64_t); 12836 12837 /* 12838 * Iterate until we can allocate a slot in the trace buffer. 12839 */ 12840 do { 12841 next = dtrace_helptrace_next; 12842 12843 if (next + size < dtrace_helptrace_bufsize) { 12844 nnext = next + size; 12845 } else { 12846 nnext = size; 12847 } 12848 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 12849 12850 /* 12851 * We have our slot; fill it in. 12852 */ 12853 if (nnext == size) 12854 next = 0; 12855 12856 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 12857 ent->dtht_helper = helper; 12858 ent->dtht_where = where; 12859 ent->dtht_nlocals = vstate->dtvs_nlocals; 12860 12861 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 12862 mstate->dtms_fltoffs : -1; 12863 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 12864 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 12865 12866 for (i = 0; i < vstate->dtvs_nlocals; i++) { 12867 dtrace_statvar_t *svar; 12868 12869 if ((svar = vstate->dtvs_locals[i]) == NULL) 12870 continue; 12871 12872 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 12873 ent->dtht_locals[i] = 12874 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 12875 } 12876 } 12877 12878 static uint64_t 12879 dtrace_helper(int which, dtrace_mstate_t *mstate, 12880 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 12881 { 12882 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12883 uint64_t sarg0 = mstate->dtms_arg[0]; 12884 uint64_t sarg1 = mstate->dtms_arg[1]; 12885 uint64_t rval; 12886 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 12887 dtrace_helper_action_t *helper; 12888 dtrace_vstate_t *vstate; 12889 dtrace_difo_t *pred; 12890 int i, trace = dtrace_helptrace_enabled; 12891 12892 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 12893 12894 if (helpers == NULL) 12895 return (0); 12896 12897 if ((helper = helpers->dthps_actions[which]) == NULL) 12898 return (0); 12899 12900 vstate = &helpers->dthps_vstate; 12901 mstate->dtms_arg[0] = arg0; 12902 mstate->dtms_arg[1] = arg1; 12903 12904 /* 12905 * Now iterate over each helper. If its predicate evaluates to 'true', 12906 * we'll call the corresponding actions. Note that the below calls 12907 * to dtrace_dif_emulate() may set faults in machine state. This is 12908 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 12909 * the stored DIF offset with its own (which is the desired behavior). 12910 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 12911 * from machine state; this is okay, too. 12912 */ 12913 for (; helper != NULL; helper = helper->dtha_next) { 12914 if ((pred = helper->dtha_predicate) != NULL) { 12915 if (trace) 12916 dtrace_helper_trace(helper, mstate, vstate, 0); 12917 12918 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 12919 goto next; 12920 12921 if (*flags & CPU_DTRACE_FAULT) 12922 goto err; 12923 } 12924 12925 for (i = 0; i < helper->dtha_nactions; i++) { 12926 if (trace) 12927 dtrace_helper_trace(helper, 12928 mstate, vstate, i + 1); 12929 12930 rval = dtrace_dif_emulate(helper->dtha_actions[i], 12931 mstate, vstate, state); 12932 12933 if (*flags & CPU_DTRACE_FAULT) 12934 goto err; 12935 } 12936 12937 next: 12938 if (trace) 12939 dtrace_helper_trace(helper, mstate, vstate, 12940 DTRACE_HELPTRACE_NEXT); 12941 } 12942 12943 if (trace) 12944 dtrace_helper_trace(helper, mstate, vstate, 12945 DTRACE_HELPTRACE_DONE); 12946 12947 /* 12948 * Restore the arg0 that we saved upon entry. 12949 */ 12950 mstate->dtms_arg[0] = sarg0; 12951 mstate->dtms_arg[1] = sarg1; 12952 12953 return (rval); 12954 12955 err: 12956 if (trace) 12957 dtrace_helper_trace(helper, mstate, vstate, 12958 DTRACE_HELPTRACE_ERR); 12959 12960 /* 12961 * Restore the arg0 that we saved upon entry. 12962 */ 12963 mstate->dtms_arg[0] = sarg0; 12964 mstate->dtms_arg[1] = sarg1; 12965 12966 return (NULL); 12967 } 12968 12969 static void 12970 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 12971 dtrace_vstate_t *vstate) 12972 { 12973 int i; 12974 12975 if (helper->dtha_predicate != NULL) 12976 dtrace_difo_release(helper->dtha_predicate, vstate); 12977 12978 for (i = 0; i < helper->dtha_nactions; i++) { 12979 ASSERT(helper->dtha_actions[i] != NULL); 12980 dtrace_difo_release(helper->dtha_actions[i], vstate); 12981 } 12982 12983 kmem_free(helper->dtha_actions, 12984 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 12985 kmem_free(helper, sizeof (dtrace_helper_action_t)); 12986 } 12987 12988 static int 12989 dtrace_helper_destroygen(int gen) 12990 { 12991 proc_t *p = curproc; 12992 dtrace_helpers_t *help = p->p_dtrace_helpers; 12993 dtrace_vstate_t *vstate; 12994 int i; 12995 12996 ASSERT(MUTEX_HELD(&dtrace_lock)); 12997 12998 if (help == NULL || gen > help->dthps_generation) 12999 return (EINVAL); 13000 13001 vstate = &help->dthps_vstate; 13002 13003 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13004 dtrace_helper_action_t *last = NULL, *h, *next; 13005 13006 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13007 next = h->dtha_next; 13008 13009 if (h->dtha_generation == gen) { 13010 if (last != NULL) { 13011 last->dtha_next = next; 13012 } else { 13013 help->dthps_actions[i] = next; 13014 } 13015 13016 dtrace_helper_action_destroy(h, vstate); 13017 } else { 13018 last = h; 13019 } 13020 } 13021 } 13022 13023 /* 13024 * Interate until we've cleared out all helper providers with the 13025 * given generation number. 13026 */ 13027 for (;;) { 13028 dtrace_helper_provider_t *prov; 13029 13030 /* 13031 * Look for a helper provider with the right generation. We 13032 * have to start back at the beginning of the list each time 13033 * because we drop dtrace_lock. It's unlikely that we'll make 13034 * more than two passes. 13035 */ 13036 for (i = 0; i < help->dthps_nprovs; i++) { 13037 prov = help->dthps_provs[i]; 13038 13039 if (prov->dthp_generation == gen) 13040 break; 13041 } 13042 13043 /* 13044 * If there were no matches, we're done. 13045 */ 13046 if (i == help->dthps_nprovs) 13047 break; 13048 13049 /* 13050 * Move the last helper provider into this slot. 13051 */ 13052 help->dthps_nprovs--; 13053 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 13054 help->dthps_provs[help->dthps_nprovs] = NULL; 13055 13056 mutex_exit(&dtrace_lock); 13057 13058 /* 13059 * If we have a meta provider, remove this helper provider. 13060 */ 13061 mutex_enter(&dtrace_meta_lock); 13062 if (dtrace_meta_pid != NULL) { 13063 ASSERT(dtrace_deferred_pid == NULL); 13064 dtrace_helper_provider_remove(&prov->dthp_prov, 13065 p->p_pid); 13066 } 13067 mutex_exit(&dtrace_meta_lock); 13068 13069 dtrace_helper_provider_destroy(prov); 13070 13071 mutex_enter(&dtrace_lock); 13072 } 13073 13074 return (0); 13075 } 13076 13077 static int 13078 dtrace_helper_validate(dtrace_helper_action_t *helper) 13079 { 13080 int err = 0, i; 13081 dtrace_difo_t *dp; 13082 13083 if ((dp = helper->dtha_predicate) != NULL) 13084 err += dtrace_difo_validate_helper(dp); 13085 13086 for (i = 0; i < helper->dtha_nactions; i++) 13087 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 13088 13089 return (err == 0); 13090 } 13091 13092 static int 13093 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 13094 { 13095 dtrace_helpers_t *help; 13096 dtrace_helper_action_t *helper, *last; 13097 dtrace_actdesc_t *act; 13098 dtrace_vstate_t *vstate; 13099 dtrace_predicate_t *pred; 13100 int count = 0, nactions = 0, i; 13101 13102 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 13103 return (EINVAL); 13104 13105 help = curproc->p_dtrace_helpers; 13106 last = help->dthps_actions[which]; 13107 vstate = &help->dthps_vstate; 13108 13109 for (count = 0; last != NULL; last = last->dtha_next) { 13110 count++; 13111 if (last->dtha_next == NULL) 13112 break; 13113 } 13114 13115 /* 13116 * If we already have dtrace_helper_actions_max helper actions for this 13117 * helper action type, we'll refuse to add a new one. 13118 */ 13119 if (count >= dtrace_helper_actions_max) 13120 return (ENOSPC); 13121 13122 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 13123 helper->dtha_generation = help->dthps_generation; 13124 13125 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 13126 ASSERT(pred->dtp_difo != NULL); 13127 dtrace_difo_hold(pred->dtp_difo); 13128 helper->dtha_predicate = pred->dtp_difo; 13129 } 13130 13131 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 13132 if (act->dtad_kind != DTRACEACT_DIFEXPR) 13133 goto err; 13134 13135 if (act->dtad_difo == NULL) 13136 goto err; 13137 13138 nactions++; 13139 } 13140 13141 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 13142 (helper->dtha_nactions = nactions), KM_SLEEP); 13143 13144 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 13145 dtrace_difo_hold(act->dtad_difo); 13146 helper->dtha_actions[i++] = act->dtad_difo; 13147 } 13148 13149 if (!dtrace_helper_validate(helper)) 13150 goto err; 13151 13152 if (last == NULL) { 13153 help->dthps_actions[which] = helper; 13154 } else { 13155 last->dtha_next = helper; 13156 } 13157 13158 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 13159 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 13160 dtrace_helptrace_next = 0; 13161 } 13162 13163 return (0); 13164 err: 13165 dtrace_helper_action_destroy(helper, vstate); 13166 return (EINVAL); 13167 } 13168 13169 static void 13170 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 13171 dof_helper_t *dofhp) 13172 { 13173 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 13174 13175 mutex_enter(&dtrace_meta_lock); 13176 mutex_enter(&dtrace_lock); 13177 13178 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 13179 /* 13180 * If the dtrace module is loaded but not attached, or if 13181 * there aren't isn't a meta provider registered to deal with 13182 * these provider descriptions, we need to postpone creating 13183 * the actual providers until later. 13184 */ 13185 13186 if (help->dthps_next == NULL && help->dthps_prev == NULL && 13187 dtrace_deferred_pid != help) { 13188 help->dthps_deferred = 1; 13189 help->dthps_pid = p->p_pid; 13190 help->dthps_next = dtrace_deferred_pid; 13191 help->dthps_prev = NULL; 13192 if (dtrace_deferred_pid != NULL) 13193 dtrace_deferred_pid->dthps_prev = help; 13194 dtrace_deferred_pid = help; 13195 } 13196 13197 mutex_exit(&dtrace_lock); 13198 13199 } else if (dofhp != NULL) { 13200 /* 13201 * If the dtrace module is loaded and we have a particular 13202 * helper provider description, pass that off to the 13203 * meta provider. 13204 */ 13205 13206 mutex_exit(&dtrace_lock); 13207 13208 dtrace_helper_provide(dofhp, p->p_pid); 13209 13210 } else { 13211 /* 13212 * Otherwise, just pass all the helper provider descriptions 13213 * off to the meta provider. 13214 */ 13215 13216 int i; 13217 mutex_exit(&dtrace_lock); 13218 13219 for (i = 0; i < help->dthps_nprovs; i++) { 13220 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 13221 p->p_pid); 13222 } 13223 } 13224 13225 mutex_exit(&dtrace_meta_lock); 13226 } 13227 13228 static int 13229 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 13230 { 13231 dtrace_helpers_t *help; 13232 dtrace_helper_provider_t *hprov, **tmp_provs; 13233 uint_t tmp_maxprovs, i; 13234 13235 ASSERT(MUTEX_HELD(&dtrace_lock)); 13236 13237 help = curproc->p_dtrace_helpers; 13238 ASSERT(help != NULL); 13239 13240 /* 13241 * If we already have dtrace_helper_providers_max helper providers, 13242 * we're refuse to add a new one. 13243 */ 13244 if (help->dthps_nprovs >= dtrace_helper_providers_max) 13245 return (ENOSPC); 13246 13247 /* 13248 * Check to make sure this isn't a duplicate. 13249 */ 13250 for (i = 0; i < help->dthps_nprovs; i++) { 13251 if (dofhp->dofhp_addr == 13252 help->dthps_provs[i]->dthp_prov.dofhp_addr) 13253 return (EALREADY); 13254 } 13255 13256 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 13257 hprov->dthp_prov = *dofhp; 13258 hprov->dthp_ref = 1; 13259 hprov->dthp_generation = gen; 13260 13261 /* 13262 * Allocate a bigger table for helper providers if it's already full. 13263 */ 13264 if (help->dthps_maxprovs == help->dthps_nprovs) { 13265 tmp_maxprovs = help->dthps_maxprovs; 13266 tmp_provs = help->dthps_provs; 13267 13268 if (help->dthps_maxprovs == 0) 13269 help->dthps_maxprovs = 2; 13270 else 13271 help->dthps_maxprovs *= 2; 13272 if (help->dthps_maxprovs > dtrace_helper_providers_max) 13273 help->dthps_maxprovs = dtrace_helper_providers_max; 13274 13275 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 13276 13277 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 13278 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13279 13280 if (tmp_provs != NULL) { 13281 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 13282 sizeof (dtrace_helper_provider_t *)); 13283 kmem_free(tmp_provs, tmp_maxprovs * 13284 sizeof (dtrace_helper_provider_t *)); 13285 } 13286 } 13287 13288 help->dthps_provs[help->dthps_nprovs] = hprov; 13289 help->dthps_nprovs++; 13290 13291 return (0); 13292 } 13293 13294 static void 13295 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 13296 { 13297 mutex_enter(&dtrace_lock); 13298 13299 if (--hprov->dthp_ref == 0) { 13300 dof_hdr_t *dof; 13301 mutex_exit(&dtrace_lock); 13302 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 13303 dtrace_dof_destroy(dof); 13304 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 13305 } else { 13306 mutex_exit(&dtrace_lock); 13307 } 13308 } 13309 13310 static int 13311 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 13312 { 13313 uintptr_t daddr = (uintptr_t)dof; 13314 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 13315 dof_provider_t *provider; 13316 dof_probe_t *probe; 13317 uint8_t *arg; 13318 char *strtab, *typestr; 13319 dof_stridx_t typeidx; 13320 size_t typesz; 13321 uint_t nprobes, j, k; 13322 13323 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 13324 13325 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 13326 dtrace_dof_error(dof, "misaligned section offset"); 13327 return (-1); 13328 } 13329 13330 /* 13331 * The section needs to be large enough to contain the DOF provider 13332 * structure appropriate for the given version. 13333 */ 13334 if (sec->dofs_size < 13335 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 13336 offsetof(dof_provider_t, dofpv_prenoffs) : 13337 sizeof (dof_provider_t))) { 13338 dtrace_dof_error(dof, "provider section too small"); 13339 return (-1); 13340 } 13341 13342 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 13343 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 13344 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 13345 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 13346 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 13347 13348 if (str_sec == NULL || prb_sec == NULL || 13349 arg_sec == NULL || off_sec == NULL) 13350 return (-1); 13351 13352 enoff_sec = NULL; 13353 13354 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13355 provider->dofpv_prenoffs != DOF_SECT_NONE && 13356 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 13357 provider->dofpv_prenoffs)) == NULL) 13358 return (-1); 13359 13360 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 13361 13362 if (provider->dofpv_name >= str_sec->dofs_size || 13363 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 13364 dtrace_dof_error(dof, "invalid provider name"); 13365 return (-1); 13366 } 13367 13368 if (prb_sec->dofs_entsize == 0 || 13369 prb_sec->dofs_entsize > prb_sec->dofs_size) { 13370 dtrace_dof_error(dof, "invalid entry size"); 13371 return (-1); 13372 } 13373 13374 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 13375 dtrace_dof_error(dof, "misaligned entry size"); 13376 return (-1); 13377 } 13378 13379 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 13380 dtrace_dof_error(dof, "invalid entry size"); 13381 return (-1); 13382 } 13383 13384 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 13385 dtrace_dof_error(dof, "misaligned section offset"); 13386 return (-1); 13387 } 13388 13389 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 13390 dtrace_dof_error(dof, "invalid entry size"); 13391 return (-1); 13392 } 13393 13394 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 13395 13396 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 13397 13398 /* 13399 * Take a pass through the probes to check for errors. 13400 */ 13401 for (j = 0; j < nprobes; j++) { 13402 probe = (dof_probe_t *)(uintptr_t)(daddr + 13403 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 13404 13405 if (probe->dofpr_func >= str_sec->dofs_size) { 13406 dtrace_dof_error(dof, "invalid function name"); 13407 return (-1); 13408 } 13409 13410 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 13411 dtrace_dof_error(dof, "function name too long"); 13412 return (-1); 13413 } 13414 13415 if (probe->dofpr_name >= str_sec->dofs_size || 13416 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 13417 dtrace_dof_error(dof, "invalid probe name"); 13418 return (-1); 13419 } 13420 13421 /* 13422 * The offset count must not wrap the index, and the offsets 13423 * must also not overflow the section's data. 13424 */ 13425 if (probe->dofpr_offidx + probe->dofpr_noffs < 13426 probe->dofpr_offidx || 13427 (probe->dofpr_offidx + probe->dofpr_noffs) * 13428 off_sec->dofs_entsize > off_sec->dofs_size) { 13429 dtrace_dof_error(dof, "invalid probe offset"); 13430 return (-1); 13431 } 13432 13433 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 13434 /* 13435 * If there's no is-enabled offset section, make sure 13436 * there aren't any is-enabled offsets. Otherwise 13437 * perform the same checks as for probe offsets 13438 * (immediately above). 13439 */ 13440 if (enoff_sec == NULL) { 13441 if (probe->dofpr_enoffidx != 0 || 13442 probe->dofpr_nenoffs != 0) { 13443 dtrace_dof_error(dof, "is-enabled " 13444 "offsets with null section"); 13445 return (-1); 13446 } 13447 } else if (probe->dofpr_enoffidx + 13448 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 13449 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 13450 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 13451 dtrace_dof_error(dof, "invalid is-enabled " 13452 "offset"); 13453 return (-1); 13454 } 13455 13456 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 13457 dtrace_dof_error(dof, "zero probe and " 13458 "is-enabled offsets"); 13459 return (-1); 13460 } 13461 } else if (probe->dofpr_noffs == 0) { 13462 dtrace_dof_error(dof, "zero probe offsets"); 13463 return (-1); 13464 } 13465 13466 if (probe->dofpr_argidx + probe->dofpr_xargc < 13467 probe->dofpr_argidx || 13468 (probe->dofpr_argidx + probe->dofpr_xargc) * 13469 arg_sec->dofs_entsize > arg_sec->dofs_size) { 13470 dtrace_dof_error(dof, "invalid args"); 13471 return (-1); 13472 } 13473 13474 typeidx = probe->dofpr_nargv; 13475 typestr = strtab + probe->dofpr_nargv; 13476 for (k = 0; k < probe->dofpr_nargc; k++) { 13477 if (typeidx >= str_sec->dofs_size) { 13478 dtrace_dof_error(dof, "bad " 13479 "native argument type"); 13480 return (-1); 13481 } 13482 13483 typesz = strlen(typestr) + 1; 13484 if (typesz > DTRACE_ARGTYPELEN) { 13485 dtrace_dof_error(dof, "native " 13486 "argument type too long"); 13487 return (-1); 13488 } 13489 typeidx += typesz; 13490 typestr += typesz; 13491 } 13492 13493 typeidx = probe->dofpr_xargv; 13494 typestr = strtab + probe->dofpr_xargv; 13495 for (k = 0; k < probe->dofpr_xargc; k++) { 13496 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 13497 dtrace_dof_error(dof, "bad " 13498 "native argument index"); 13499 return (-1); 13500 } 13501 13502 if (typeidx >= str_sec->dofs_size) { 13503 dtrace_dof_error(dof, "bad " 13504 "translated argument type"); 13505 return (-1); 13506 } 13507 13508 typesz = strlen(typestr) + 1; 13509 if (typesz > DTRACE_ARGTYPELEN) { 13510 dtrace_dof_error(dof, "translated argument " 13511 "type too long"); 13512 return (-1); 13513 } 13514 13515 typeidx += typesz; 13516 typestr += typesz; 13517 } 13518 } 13519 13520 return (0); 13521 } 13522 13523 static int 13524 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 13525 { 13526 dtrace_helpers_t *help; 13527 dtrace_vstate_t *vstate; 13528 dtrace_enabling_t *enab = NULL; 13529 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 13530 uintptr_t daddr = (uintptr_t)dof; 13531 13532 ASSERT(MUTEX_HELD(&dtrace_lock)); 13533 13534 if ((help = curproc->p_dtrace_helpers) == NULL) 13535 help = dtrace_helpers_create(curproc); 13536 13537 vstate = &help->dthps_vstate; 13538 13539 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 13540 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 13541 dtrace_dof_destroy(dof); 13542 return (rv); 13543 } 13544 13545 /* 13546 * Look for helper providers and validate their descriptions. 13547 */ 13548 if (dhp != NULL) { 13549 for (i = 0; i < dof->dofh_secnum; i++) { 13550 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 13551 dof->dofh_secoff + i * dof->dofh_secsize); 13552 13553 if (sec->dofs_type != DOF_SECT_PROVIDER) 13554 continue; 13555 13556 if (dtrace_helper_provider_validate(dof, sec) != 0) { 13557 dtrace_enabling_destroy(enab); 13558 dtrace_dof_destroy(dof); 13559 return (-1); 13560 } 13561 13562 nprovs++; 13563 } 13564 } 13565 13566 /* 13567 * Now we need to walk through the ECB descriptions in the enabling. 13568 */ 13569 for (i = 0; i < enab->dten_ndesc; i++) { 13570 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 13571 dtrace_probedesc_t *desc = &ep->dted_probe; 13572 13573 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 13574 continue; 13575 13576 if (strcmp(desc->dtpd_mod, "helper") != 0) 13577 continue; 13578 13579 if (strcmp(desc->dtpd_func, "ustack") != 0) 13580 continue; 13581 13582 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 13583 ep)) != 0) { 13584 /* 13585 * Adding this helper action failed -- we are now going 13586 * to rip out the entire generation and return failure. 13587 */ 13588 (void) dtrace_helper_destroygen(help->dthps_generation); 13589 dtrace_enabling_destroy(enab); 13590 dtrace_dof_destroy(dof); 13591 return (-1); 13592 } 13593 13594 nhelpers++; 13595 } 13596 13597 if (nhelpers < enab->dten_ndesc) 13598 dtrace_dof_error(dof, "unmatched helpers"); 13599 13600 gen = help->dthps_generation++; 13601 dtrace_enabling_destroy(enab); 13602 13603 if (dhp != NULL && nprovs > 0) { 13604 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 13605 if (dtrace_helper_provider_add(dhp, gen) == 0) { 13606 mutex_exit(&dtrace_lock); 13607 dtrace_helper_provider_register(curproc, help, dhp); 13608 mutex_enter(&dtrace_lock); 13609 13610 destroy = 0; 13611 } 13612 } 13613 13614 if (destroy) 13615 dtrace_dof_destroy(dof); 13616 13617 return (gen); 13618 } 13619 13620 static dtrace_helpers_t * 13621 dtrace_helpers_create(proc_t *p) 13622 { 13623 dtrace_helpers_t *help; 13624 13625 ASSERT(MUTEX_HELD(&dtrace_lock)); 13626 ASSERT(p->p_dtrace_helpers == NULL); 13627 13628 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 13629 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 13630 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 13631 13632 p->p_dtrace_helpers = help; 13633 dtrace_helpers++; 13634 13635 return (help); 13636 } 13637 13638 static void 13639 dtrace_helpers_destroy(void) 13640 { 13641 dtrace_helpers_t *help; 13642 dtrace_vstate_t *vstate; 13643 proc_t *p = curproc; 13644 int i; 13645 13646 mutex_enter(&dtrace_lock); 13647 13648 ASSERT(p->p_dtrace_helpers != NULL); 13649 ASSERT(dtrace_helpers > 0); 13650 13651 help = p->p_dtrace_helpers; 13652 vstate = &help->dthps_vstate; 13653 13654 /* 13655 * We're now going to lose the help from this process. 13656 */ 13657 p->p_dtrace_helpers = NULL; 13658 dtrace_sync(); 13659 13660 /* 13661 * Destory the helper actions. 13662 */ 13663 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13664 dtrace_helper_action_t *h, *next; 13665 13666 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13667 next = h->dtha_next; 13668 dtrace_helper_action_destroy(h, vstate); 13669 h = next; 13670 } 13671 } 13672 13673 mutex_exit(&dtrace_lock); 13674 13675 /* 13676 * Destroy the helper providers. 13677 */ 13678 if (help->dthps_maxprovs > 0) { 13679 mutex_enter(&dtrace_meta_lock); 13680 if (dtrace_meta_pid != NULL) { 13681 ASSERT(dtrace_deferred_pid == NULL); 13682 13683 for (i = 0; i < help->dthps_nprovs; i++) { 13684 dtrace_helper_provider_remove( 13685 &help->dthps_provs[i]->dthp_prov, p->p_pid); 13686 } 13687 } else { 13688 mutex_enter(&dtrace_lock); 13689 ASSERT(help->dthps_deferred == 0 || 13690 help->dthps_next != NULL || 13691 help->dthps_prev != NULL || 13692 help == dtrace_deferred_pid); 13693 13694 /* 13695 * Remove the helper from the deferred list. 13696 */ 13697 if (help->dthps_next != NULL) 13698 help->dthps_next->dthps_prev = help->dthps_prev; 13699 if (help->dthps_prev != NULL) 13700 help->dthps_prev->dthps_next = help->dthps_next; 13701 if (dtrace_deferred_pid == help) { 13702 dtrace_deferred_pid = help->dthps_next; 13703 ASSERT(help->dthps_prev == NULL); 13704 } 13705 13706 mutex_exit(&dtrace_lock); 13707 } 13708 13709 mutex_exit(&dtrace_meta_lock); 13710 13711 for (i = 0; i < help->dthps_nprovs; i++) { 13712 dtrace_helper_provider_destroy(help->dthps_provs[i]); 13713 } 13714 13715 kmem_free(help->dthps_provs, help->dthps_maxprovs * 13716 sizeof (dtrace_helper_provider_t *)); 13717 } 13718 13719 mutex_enter(&dtrace_lock); 13720 13721 dtrace_vstate_fini(&help->dthps_vstate); 13722 kmem_free(help->dthps_actions, 13723 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 13724 kmem_free(help, sizeof (dtrace_helpers_t)); 13725 13726 --dtrace_helpers; 13727 mutex_exit(&dtrace_lock); 13728 } 13729 13730 static void 13731 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 13732 { 13733 dtrace_helpers_t *help, *newhelp; 13734 dtrace_helper_action_t *helper, *new, *last; 13735 dtrace_difo_t *dp; 13736 dtrace_vstate_t *vstate; 13737 int i, j, sz, hasprovs = 0; 13738 13739 mutex_enter(&dtrace_lock); 13740 ASSERT(from->p_dtrace_helpers != NULL); 13741 ASSERT(dtrace_helpers > 0); 13742 13743 help = from->p_dtrace_helpers; 13744 newhelp = dtrace_helpers_create(to); 13745 ASSERT(to->p_dtrace_helpers != NULL); 13746 13747 newhelp->dthps_generation = help->dthps_generation; 13748 vstate = &newhelp->dthps_vstate; 13749 13750 /* 13751 * Duplicate the helper actions. 13752 */ 13753 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13754 if ((helper = help->dthps_actions[i]) == NULL) 13755 continue; 13756 13757 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 13758 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 13759 KM_SLEEP); 13760 new->dtha_generation = helper->dtha_generation; 13761 13762 if ((dp = helper->dtha_predicate) != NULL) { 13763 dp = dtrace_difo_duplicate(dp, vstate); 13764 new->dtha_predicate = dp; 13765 } 13766 13767 new->dtha_nactions = helper->dtha_nactions; 13768 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 13769 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 13770 13771 for (j = 0; j < new->dtha_nactions; j++) { 13772 dtrace_difo_t *dp = helper->dtha_actions[j]; 13773 13774 ASSERT(dp != NULL); 13775 dp = dtrace_difo_duplicate(dp, vstate); 13776 new->dtha_actions[j] = dp; 13777 } 13778 13779 if (last != NULL) { 13780 last->dtha_next = new; 13781 } else { 13782 newhelp->dthps_actions[i] = new; 13783 } 13784 13785 last = new; 13786 } 13787 } 13788 13789 /* 13790 * Duplicate the helper providers and register them with the 13791 * DTrace framework. 13792 */ 13793 if (help->dthps_nprovs > 0) { 13794 newhelp->dthps_nprovs = help->dthps_nprovs; 13795 newhelp->dthps_maxprovs = help->dthps_nprovs; 13796 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 13797 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13798 for (i = 0; i < newhelp->dthps_nprovs; i++) { 13799 newhelp->dthps_provs[i] = help->dthps_provs[i]; 13800 newhelp->dthps_provs[i]->dthp_ref++; 13801 } 13802 13803 hasprovs = 1; 13804 } 13805 13806 mutex_exit(&dtrace_lock); 13807 13808 if (hasprovs) 13809 dtrace_helper_provider_register(to, newhelp, NULL); 13810 } 13811 13812 /* 13813 * DTrace Hook Functions 13814 */ 13815 static void 13816 dtrace_module_loaded(struct modctl *ctl) 13817 { 13818 dtrace_provider_t *prv; 13819 13820 mutex_enter(&dtrace_provider_lock); 13821 mutex_enter(&mod_lock); 13822 13823 ASSERT(ctl->mod_busy); 13824 13825 /* 13826 * We're going to call each providers per-module provide operation 13827 * specifying only this module. 13828 */ 13829 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 13830 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 13831 13832 mutex_exit(&mod_lock); 13833 mutex_exit(&dtrace_provider_lock); 13834 13835 /* 13836 * If we have any retained enablings, we need to match against them. 13837 * Enabling probes requires that cpu_lock be held, and we cannot hold 13838 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 13839 * module. (In particular, this happens when loading scheduling 13840 * classes.) So if we have any retained enablings, we need to dispatch 13841 * our task queue to do the match for us. 13842 */ 13843 mutex_enter(&dtrace_lock); 13844 13845 if (dtrace_retained == NULL) { 13846 mutex_exit(&dtrace_lock); 13847 return; 13848 } 13849 13850 (void) taskq_dispatch(dtrace_taskq, 13851 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 13852 13853 mutex_exit(&dtrace_lock); 13854 13855 /* 13856 * And now, for a little heuristic sleaze: in general, we want to 13857 * match modules as soon as they load. However, we cannot guarantee 13858 * this, because it would lead us to the lock ordering violation 13859 * outlined above. The common case, of course, is that cpu_lock is 13860 * _not_ held -- so we delay here for a clock tick, hoping that that's 13861 * long enough for the task queue to do its work. If it's not, it's 13862 * not a serious problem -- it just means that the module that we 13863 * just loaded may not be immediately instrumentable. 13864 */ 13865 delay(1); 13866 } 13867 13868 static void 13869 dtrace_module_unloaded(struct modctl *ctl) 13870 { 13871 dtrace_probe_t template, *probe, *first, *next; 13872 dtrace_provider_t *prov; 13873 13874 template.dtpr_mod = ctl->mod_modname; 13875 13876 mutex_enter(&dtrace_provider_lock); 13877 mutex_enter(&mod_lock); 13878 mutex_enter(&dtrace_lock); 13879 13880 if (dtrace_bymod == NULL) { 13881 /* 13882 * The DTrace module is loaded (obviously) but not attached; 13883 * we don't have any work to do. 13884 */ 13885 mutex_exit(&dtrace_provider_lock); 13886 mutex_exit(&mod_lock); 13887 mutex_exit(&dtrace_lock); 13888 return; 13889 } 13890 13891 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 13892 probe != NULL; probe = probe->dtpr_nextmod) { 13893 if (probe->dtpr_ecb != NULL) { 13894 mutex_exit(&dtrace_provider_lock); 13895 mutex_exit(&mod_lock); 13896 mutex_exit(&dtrace_lock); 13897 13898 /* 13899 * This shouldn't _actually_ be possible -- we're 13900 * unloading a module that has an enabled probe in it. 13901 * (It's normally up to the provider to make sure that 13902 * this can't happen.) However, because dtps_enable() 13903 * doesn't have a failure mode, there can be an 13904 * enable/unload race. Upshot: we don't want to 13905 * assert, but we're not going to disable the 13906 * probe, either. 13907 */ 13908 if (dtrace_err_verbose) { 13909 cmn_err(CE_WARN, "unloaded module '%s' had " 13910 "enabled probes", ctl->mod_modname); 13911 } 13912 13913 return; 13914 } 13915 } 13916 13917 probe = first; 13918 13919 for (first = NULL; probe != NULL; probe = next) { 13920 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 13921 13922 dtrace_probes[probe->dtpr_id - 1] = NULL; 13923 13924 next = probe->dtpr_nextmod; 13925 dtrace_hash_remove(dtrace_bymod, probe); 13926 dtrace_hash_remove(dtrace_byfunc, probe); 13927 dtrace_hash_remove(dtrace_byname, probe); 13928 13929 if (first == NULL) { 13930 first = probe; 13931 probe->dtpr_nextmod = NULL; 13932 } else { 13933 probe->dtpr_nextmod = first; 13934 first = probe; 13935 } 13936 } 13937 13938 /* 13939 * We've removed all of the module's probes from the hash chains and 13940 * from the probe array. Now issue a dtrace_sync() to be sure that 13941 * everyone has cleared out from any probe array processing. 13942 */ 13943 dtrace_sync(); 13944 13945 for (probe = first; probe != NULL; probe = first) { 13946 first = probe->dtpr_nextmod; 13947 prov = probe->dtpr_provider; 13948 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 13949 probe->dtpr_arg); 13950 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 13951 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 13952 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 13953 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 13954 kmem_free(probe, sizeof (dtrace_probe_t)); 13955 } 13956 13957 mutex_exit(&dtrace_lock); 13958 mutex_exit(&mod_lock); 13959 mutex_exit(&dtrace_provider_lock); 13960 } 13961 13962 void 13963 dtrace_suspend(void) 13964 { 13965 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 13966 } 13967 13968 void 13969 dtrace_resume(void) 13970 { 13971 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 13972 } 13973 13974 static int 13975 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 13976 { 13977 ASSERT(MUTEX_HELD(&cpu_lock)); 13978 mutex_enter(&dtrace_lock); 13979 13980 switch (what) { 13981 case CPU_CONFIG: { 13982 dtrace_state_t *state; 13983 dtrace_optval_t *opt, rs, c; 13984 13985 /* 13986 * For now, we only allocate a new buffer for anonymous state. 13987 */ 13988 if ((state = dtrace_anon.dta_state) == NULL) 13989 break; 13990 13991 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13992 break; 13993 13994 opt = state->dts_options; 13995 c = opt[DTRACEOPT_CPU]; 13996 13997 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 13998 break; 13999 14000 /* 14001 * Regardless of what the actual policy is, we're going to 14002 * temporarily set our resize policy to be manual. We're 14003 * also going to temporarily set our CPU option to denote 14004 * the newly configured CPU. 14005 */ 14006 rs = opt[DTRACEOPT_BUFRESIZE]; 14007 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14008 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14009 14010 (void) dtrace_state_buffers(state); 14011 14012 opt[DTRACEOPT_BUFRESIZE] = rs; 14013 opt[DTRACEOPT_CPU] = c; 14014 14015 break; 14016 } 14017 14018 case CPU_UNCONFIG: 14019 /* 14020 * We don't free the buffer in the CPU_UNCONFIG case. (The 14021 * buffer will be freed when the consumer exits.) 14022 */ 14023 break; 14024 14025 default: 14026 break; 14027 } 14028 14029 mutex_exit(&dtrace_lock); 14030 return (0); 14031 } 14032 14033 static void 14034 dtrace_cpu_setup_initial(processorid_t cpu) 14035 { 14036 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 14037 } 14038 14039 static void 14040 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 14041 { 14042 if (dtrace_toxranges >= dtrace_toxranges_max) { 14043 int osize, nsize; 14044 dtrace_toxrange_t *range; 14045 14046 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14047 14048 if (osize == 0) { 14049 ASSERT(dtrace_toxrange == NULL); 14050 ASSERT(dtrace_toxranges_max == 0); 14051 dtrace_toxranges_max = 1; 14052 } else { 14053 dtrace_toxranges_max <<= 1; 14054 } 14055 14056 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14057 range = kmem_zalloc(nsize, KM_SLEEP); 14058 14059 if (dtrace_toxrange != NULL) { 14060 ASSERT(osize != 0); 14061 bcopy(dtrace_toxrange, range, osize); 14062 kmem_free(dtrace_toxrange, osize); 14063 } 14064 14065 dtrace_toxrange = range; 14066 } 14067 14068 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 14069 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 14070 14071 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 14072 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 14073 dtrace_toxranges++; 14074 } 14075 14076 /* 14077 * DTrace Driver Cookbook Functions 14078 */ 14079 /*ARGSUSED*/ 14080 static int 14081 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 14082 { 14083 dtrace_provider_id_t id; 14084 dtrace_state_t *state = NULL; 14085 dtrace_enabling_t *enab; 14086 14087 mutex_enter(&cpu_lock); 14088 mutex_enter(&dtrace_provider_lock); 14089 mutex_enter(&dtrace_lock); 14090 14091 if (ddi_soft_state_init(&dtrace_softstate, 14092 sizeof (dtrace_state_t), 0) != 0) { 14093 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 14094 mutex_exit(&cpu_lock); 14095 mutex_exit(&dtrace_provider_lock); 14096 mutex_exit(&dtrace_lock); 14097 return (DDI_FAILURE); 14098 } 14099 14100 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 14101 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 14102 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 14103 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 14104 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 14105 ddi_remove_minor_node(devi, NULL); 14106 ddi_soft_state_fini(&dtrace_softstate); 14107 mutex_exit(&cpu_lock); 14108 mutex_exit(&dtrace_provider_lock); 14109 mutex_exit(&dtrace_lock); 14110 return (DDI_FAILURE); 14111 } 14112 14113 ddi_report_dev(devi); 14114 dtrace_devi = devi; 14115 14116 dtrace_modload = dtrace_module_loaded; 14117 dtrace_modunload = dtrace_module_unloaded; 14118 dtrace_cpu_init = dtrace_cpu_setup_initial; 14119 dtrace_helpers_cleanup = dtrace_helpers_destroy; 14120 dtrace_helpers_fork = dtrace_helpers_duplicate; 14121 dtrace_cpustart_init = dtrace_suspend; 14122 dtrace_cpustart_fini = dtrace_resume; 14123 dtrace_debugger_init = dtrace_suspend; 14124 dtrace_debugger_fini = dtrace_resume; 14125 14126 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14127 14128 ASSERT(MUTEX_HELD(&cpu_lock)); 14129 14130 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 14131 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14132 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 14133 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 14134 VM_SLEEP | VMC_IDENTIFIER); 14135 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 14136 1, INT_MAX, 0); 14137 14138 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 14139 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 14140 NULL, NULL, NULL, NULL, NULL, 0); 14141 14142 ASSERT(MUTEX_HELD(&cpu_lock)); 14143 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 14144 offsetof(dtrace_probe_t, dtpr_nextmod), 14145 offsetof(dtrace_probe_t, dtpr_prevmod)); 14146 14147 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 14148 offsetof(dtrace_probe_t, dtpr_nextfunc), 14149 offsetof(dtrace_probe_t, dtpr_prevfunc)); 14150 14151 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 14152 offsetof(dtrace_probe_t, dtpr_nextname), 14153 offsetof(dtrace_probe_t, dtpr_prevname)); 14154 14155 if (dtrace_retain_max < 1) { 14156 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 14157 "setting to 1", dtrace_retain_max); 14158 dtrace_retain_max = 1; 14159 } 14160 14161 /* 14162 * Now discover our toxic ranges. 14163 */ 14164 dtrace_toxic_ranges(dtrace_toxrange_add); 14165 14166 /* 14167 * Before we register ourselves as a provider to our own framework, 14168 * we would like to assert that dtrace_provider is NULL -- but that's 14169 * not true if we were loaded as a dependency of a DTrace provider. 14170 * Once we've registered, we can assert that dtrace_provider is our 14171 * pseudo provider. 14172 */ 14173 (void) dtrace_register("dtrace", &dtrace_provider_attr, 14174 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 14175 14176 ASSERT(dtrace_provider != NULL); 14177 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 14178 14179 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 14180 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 14181 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 14182 dtrace_provider, NULL, NULL, "END", 0, NULL); 14183 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 14184 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 14185 14186 dtrace_anon_property(); 14187 mutex_exit(&cpu_lock); 14188 14189 /* 14190 * If DTrace helper tracing is enabled, we need to allocate the 14191 * trace buffer and initialize the values. 14192 */ 14193 if (dtrace_helptrace_enabled) { 14194 ASSERT(dtrace_helptrace_buffer == NULL); 14195 dtrace_helptrace_buffer = 14196 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 14197 dtrace_helptrace_next = 0; 14198 } 14199 14200 /* 14201 * If there are already providers, we must ask them to provide their 14202 * probes, and then match any anonymous enabling against them. Note 14203 * that there should be no other retained enablings at this time: 14204 * the only retained enablings at this time should be the anonymous 14205 * enabling. 14206 */ 14207 if (dtrace_anon.dta_enabling != NULL) { 14208 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 14209 14210 dtrace_enabling_provide(NULL); 14211 state = dtrace_anon.dta_state; 14212 14213 /* 14214 * We couldn't hold cpu_lock across the above call to 14215 * dtrace_enabling_provide(), but we must hold it to actually 14216 * enable the probes. We have to drop all of our locks, pick 14217 * up cpu_lock, and regain our locks before matching the 14218 * retained anonymous enabling. 14219 */ 14220 mutex_exit(&dtrace_lock); 14221 mutex_exit(&dtrace_provider_lock); 14222 14223 mutex_enter(&cpu_lock); 14224 mutex_enter(&dtrace_provider_lock); 14225 mutex_enter(&dtrace_lock); 14226 14227 if ((enab = dtrace_anon.dta_enabling) != NULL) 14228 (void) dtrace_enabling_match(enab, NULL); 14229 14230 mutex_exit(&cpu_lock); 14231 } 14232 14233 mutex_exit(&dtrace_lock); 14234 mutex_exit(&dtrace_provider_lock); 14235 14236 if (state != NULL) { 14237 /* 14238 * If we created any anonymous state, set it going now. 14239 */ 14240 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 14241 } 14242 14243 return (DDI_SUCCESS); 14244 } 14245 14246 /*ARGSUSED*/ 14247 static int 14248 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 14249 { 14250 dtrace_state_t *state; 14251 uint32_t priv; 14252 uid_t uid; 14253 zoneid_t zoneid; 14254 14255 if (getminor(*devp) == DTRACEMNRN_HELPER) 14256 return (0); 14257 14258 /* 14259 * If this wasn't an open with the "helper" minor, then it must be 14260 * the "dtrace" minor. 14261 */ 14262 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 14263 14264 /* 14265 * If no DTRACE_PRIV_* bits are set in the credential, then the 14266 * caller lacks sufficient permission to do anything with DTrace. 14267 */ 14268 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 14269 if (priv == DTRACE_PRIV_NONE) 14270 return (EACCES); 14271 14272 /* 14273 * Ask all providers to provide all their probes. 14274 */ 14275 mutex_enter(&dtrace_provider_lock); 14276 dtrace_probe_provide(NULL, NULL); 14277 mutex_exit(&dtrace_provider_lock); 14278 14279 mutex_enter(&cpu_lock); 14280 mutex_enter(&dtrace_lock); 14281 dtrace_opens++; 14282 dtrace_membar_producer(); 14283 14284 /* 14285 * If the kernel debugger is active (that is, if the kernel debugger 14286 * modified text in some way), we won't allow the open. 14287 */ 14288 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14289 dtrace_opens--; 14290 mutex_exit(&cpu_lock); 14291 mutex_exit(&dtrace_lock); 14292 return (EBUSY); 14293 } 14294 14295 state = dtrace_state_create(devp, cred_p); 14296 mutex_exit(&cpu_lock); 14297 14298 if (state == NULL) { 14299 if (--dtrace_opens == 0) 14300 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14301 mutex_exit(&dtrace_lock); 14302 return (EAGAIN); 14303 } 14304 14305 mutex_exit(&dtrace_lock); 14306 14307 return (0); 14308 } 14309 14310 /*ARGSUSED*/ 14311 static int 14312 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 14313 { 14314 minor_t minor = getminor(dev); 14315 dtrace_state_t *state; 14316 14317 if (minor == DTRACEMNRN_HELPER) 14318 return (0); 14319 14320 state = ddi_get_soft_state(dtrace_softstate, minor); 14321 14322 mutex_enter(&cpu_lock); 14323 mutex_enter(&dtrace_lock); 14324 14325 if (state->dts_anon) { 14326 /* 14327 * There is anonymous state. Destroy that first. 14328 */ 14329 ASSERT(dtrace_anon.dta_state == NULL); 14330 dtrace_state_destroy(state->dts_anon); 14331 } 14332 14333 dtrace_state_destroy(state); 14334 ASSERT(dtrace_opens > 0); 14335 if (--dtrace_opens == 0) 14336 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14337 14338 mutex_exit(&dtrace_lock); 14339 mutex_exit(&cpu_lock); 14340 14341 return (0); 14342 } 14343 14344 /*ARGSUSED*/ 14345 static int 14346 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 14347 { 14348 int rval; 14349 dof_helper_t help, *dhp = NULL; 14350 14351 switch (cmd) { 14352 case DTRACEHIOC_ADDDOF: 14353 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 14354 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 14355 return (EFAULT); 14356 } 14357 14358 dhp = &help; 14359 arg = (intptr_t)help.dofhp_dof; 14360 /*FALLTHROUGH*/ 14361 14362 case DTRACEHIOC_ADD: { 14363 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 14364 14365 if (dof == NULL) 14366 return (rval); 14367 14368 mutex_enter(&dtrace_lock); 14369 14370 /* 14371 * dtrace_helper_slurp() takes responsibility for the dof -- 14372 * it may free it now or it may save it and free it later. 14373 */ 14374 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 14375 *rv = rval; 14376 rval = 0; 14377 } else { 14378 rval = EINVAL; 14379 } 14380 14381 mutex_exit(&dtrace_lock); 14382 return (rval); 14383 } 14384 14385 case DTRACEHIOC_REMOVE: { 14386 mutex_enter(&dtrace_lock); 14387 rval = dtrace_helper_destroygen(arg); 14388 mutex_exit(&dtrace_lock); 14389 14390 return (rval); 14391 } 14392 14393 default: 14394 break; 14395 } 14396 14397 return (ENOTTY); 14398 } 14399 14400 /*ARGSUSED*/ 14401 static int 14402 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 14403 { 14404 minor_t minor = getminor(dev); 14405 dtrace_state_t *state; 14406 int rval; 14407 14408 if (minor == DTRACEMNRN_HELPER) 14409 return (dtrace_ioctl_helper(cmd, arg, rv)); 14410 14411 state = ddi_get_soft_state(dtrace_softstate, minor); 14412 14413 if (state->dts_anon) { 14414 ASSERT(dtrace_anon.dta_state == NULL); 14415 state = state->dts_anon; 14416 } 14417 14418 switch (cmd) { 14419 case DTRACEIOC_PROVIDER: { 14420 dtrace_providerdesc_t pvd; 14421 dtrace_provider_t *pvp; 14422 14423 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 14424 return (EFAULT); 14425 14426 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 14427 mutex_enter(&dtrace_provider_lock); 14428 14429 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 14430 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 14431 break; 14432 } 14433 14434 mutex_exit(&dtrace_provider_lock); 14435 14436 if (pvp == NULL) 14437 return (ESRCH); 14438 14439 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 14440 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 14441 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 14442 return (EFAULT); 14443 14444 return (0); 14445 } 14446 14447 case DTRACEIOC_EPROBE: { 14448 dtrace_eprobedesc_t epdesc; 14449 dtrace_ecb_t *ecb; 14450 dtrace_action_t *act; 14451 void *buf; 14452 size_t size; 14453 uintptr_t dest; 14454 int nrecs; 14455 14456 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 14457 return (EFAULT); 14458 14459 mutex_enter(&dtrace_lock); 14460 14461 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 14462 mutex_exit(&dtrace_lock); 14463 return (EINVAL); 14464 } 14465 14466 if (ecb->dte_probe == NULL) { 14467 mutex_exit(&dtrace_lock); 14468 return (EINVAL); 14469 } 14470 14471 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 14472 epdesc.dtepd_uarg = ecb->dte_uarg; 14473 epdesc.dtepd_size = ecb->dte_size; 14474 14475 nrecs = epdesc.dtepd_nrecs; 14476 epdesc.dtepd_nrecs = 0; 14477 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14478 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14479 continue; 14480 14481 epdesc.dtepd_nrecs++; 14482 } 14483 14484 /* 14485 * Now that we have the size, we need to allocate a temporary 14486 * buffer in which to store the complete description. We need 14487 * the temporary buffer to be able to drop dtrace_lock() 14488 * across the copyout(), below. 14489 */ 14490 size = sizeof (dtrace_eprobedesc_t) + 14491 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 14492 14493 buf = kmem_alloc(size, KM_SLEEP); 14494 dest = (uintptr_t)buf; 14495 14496 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 14497 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 14498 14499 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14500 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14501 continue; 14502 14503 if (nrecs-- == 0) 14504 break; 14505 14506 bcopy(&act->dta_rec, (void *)dest, 14507 sizeof (dtrace_recdesc_t)); 14508 dest += sizeof (dtrace_recdesc_t); 14509 } 14510 14511 mutex_exit(&dtrace_lock); 14512 14513 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14514 kmem_free(buf, size); 14515 return (EFAULT); 14516 } 14517 14518 kmem_free(buf, size); 14519 return (0); 14520 } 14521 14522 case DTRACEIOC_AGGDESC: { 14523 dtrace_aggdesc_t aggdesc; 14524 dtrace_action_t *act; 14525 dtrace_aggregation_t *agg; 14526 int nrecs; 14527 uint32_t offs; 14528 dtrace_recdesc_t *lrec; 14529 void *buf; 14530 size_t size; 14531 uintptr_t dest; 14532 14533 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 14534 return (EFAULT); 14535 14536 mutex_enter(&dtrace_lock); 14537 14538 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 14539 mutex_exit(&dtrace_lock); 14540 return (EINVAL); 14541 } 14542 14543 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 14544 14545 nrecs = aggdesc.dtagd_nrecs; 14546 aggdesc.dtagd_nrecs = 0; 14547 14548 offs = agg->dtag_base; 14549 lrec = &agg->dtag_action.dta_rec; 14550 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 14551 14552 for (act = agg->dtag_first; ; act = act->dta_next) { 14553 ASSERT(act->dta_intuple || 14554 DTRACEACT_ISAGG(act->dta_kind)); 14555 14556 /* 14557 * If this action has a record size of zero, it 14558 * denotes an argument to the aggregating action. 14559 * Because the presence of this record doesn't (or 14560 * shouldn't) affect the way the data is interpreted, 14561 * we don't copy it out to save user-level the 14562 * confusion of dealing with a zero-length record. 14563 */ 14564 if (act->dta_rec.dtrd_size == 0) { 14565 ASSERT(agg->dtag_hasarg); 14566 continue; 14567 } 14568 14569 aggdesc.dtagd_nrecs++; 14570 14571 if (act == &agg->dtag_action) 14572 break; 14573 } 14574 14575 /* 14576 * Now that we have the size, we need to allocate a temporary 14577 * buffer in which to store the complete description. We need 14578 * the temporary buffer to be able to drop dtrace_lock() 14579 * across the copyout(), below. 14580 */ 14581 size = sizeof (dtrace_aggdesc_t) + 14582 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 14583 14584 buf = kmem_alloc(size, KM_SLEEP); 14585 dest = (uintptr_t)buf; 14586 14587 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 14588 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 14589 14590 for (act = agg->dtag_first; ; act = act->dta_next) { 14591 dtrace_recdesc_t rec = act->dta_rec; 14592 14593 /* 14594 * See the comment in the above loop for why we pass 14595 * over zero-length records. 14596 */ 14597 if (rec.dtrd_size == 0) { 14598 ASSERT(agg->dtag_hasarg); 14599 continue; 14600 } 14601 14602 if (nrecs-- == 0) 14603 break; 14604 14605 rec.dtrd_offset -= offs; 14606 bcopy(&rec, (void *)dest, sizeof (rec)); 14607 dest += sizeof (dtrace_recdesc_t); 14608 14609 if (act == &agg->dtag_action) 14610 break; 14611 } 14612 14613 mutex_exit(&dtrace_lock); 14614 14615 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14616 kmem_free(buf, size); 14617 return (EFAULT); 14618 } 14619 14620 kmem_free(buf, size); 14621 return (0); 14622 } 14623 14624 case DTRACEIOC_ENABLE: { 14625 dof_hdr_t *dof; 14626 dtrace_enabling_t *enab = NULL; 14627 dtrace_vstate_t *vstate; 14628 int err = 0; 14629 14630 *rv = 0; 14631 14632 /* 14633 * If a NULL argument has been passed, we take this as our 14634 * cue to reevaluate our enablings. 14635 */ 14636 if (arg == NULL) { 14637 mutex_enter(&cpu_lock); 14638 mutex_enter(&dtrace_lock); 14639 err = dtrace_enabling_matchstate(state, rv); 14640 mutex_exit(&dtrace_lock); 14641 mutex_exit(&cpu_lock); 14642 14643 return (err); 14644 } 14645 14646 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 14647 return (rval); 14648 14649 mutex_enter(&cpu_lock); 14650 mutex_enter(&dtrace_lock); 14651 vstate = &state->dts_vstate; 14652 14653 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14654 mutex_exit(&dtrace_lock); 14655 mutex_exit(&cpu_lock); 14656 dtrace_dof_destroy(dof); 14657 return (EBUSY); 14658 } 14659 14660 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 14661 mutex_exit(&dtrace_lock); 14662 mutex_exit(&cpu_lock); 14663 dtrace_dof_destroy(dof); 14664 return (EINVAL); 14665 } 14666 14667 if ((rval = dtrace_dof_options(dof, state)) != 0) { 14668 dtrace_enabling_destroy(enab); 14669 mutex_exit(&dtrace_lock); 14670 mutex_exit(&cpu_lock); 14671 dtrace_dof_destroy(dof); 14672 return (rval); 14673 } 14674 14675 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 14676 err = dtrace_enabling_retain(enab); 14677 } else { 14678 dtrace_enabling_destroy(enab); 14679 } 14680 14681 mutex_exit(&cpu_lock); 14682 mutex_exit(&dtrace_lock); 14683 dtrace_dof_destroy(dof); 14684 14685 return (err); 14686 } 14687 14688 case DTRACEIOC_REPLICATE: { 14689 dtrace_repldesc_t desc; 14690 dtrace_probedesc_t *match = &desc.dtrpd_match; 14691 dtrace_probedesc_t *create = &desc.dtrpd_create; 14692 int err; 14693 14694 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14695 return (EFAULT); 14696 14697 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14698 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14699 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14700 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14701 14702 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14703 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14704 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14705 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14706 14707 mutex_enter(&dtrace_lock); 14708 err = dtrace_enabling_replicate(state, match, create); 14709 mutex_exit(&dtrace_lock); 14710 14711 return (err); 14712 } 14713 14714 case DTRACEIOC_PROBEMATCH: 14715 case DTRACEIOC_PROBES: { 14716 dtrace_probe_t *probe = NULL; 14717 dtrace_probedesc_t desc; 14718 dtrace_probekey_t pkey; 14719 dtrace_id_t i; 14720 int m = 0; 14721 uint32_t priv; 14722 uid_t uid; 14723 zoneid_t zoneid; 14724 14725 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14726 return (EFAULT); 14727 14728 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14729 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14730 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14731 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14732 14733 /* 14734 * Before we attempt to match this probe, we want to give 14735 * all providers the opportunity to provide it. 14736 */ 14737 if (desc.dtpd_id == DTRACE_IDNONE) { 14738 mutex_enter(&dtrace_provider_lock); 14739 dtrace_probe_provide(&desc, NULL); 14740 mutex_exit(&dtrace_provider_lock); 14741 desc.dtpd_id++; 14742 } 14743 14744 if (cmd == DTRACEIOC_PROBEMATCH) { 14745 dtrace_probekey(&desc, &pkey); 14746 pkey.dtpk_id = DTRACE_IDNONE; 14747 } 14748 14749 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 14750 14751 mutex_enter(&dtrace_lock); 14752 14753 if (cmd == DTRACEIOC_PROBEMATCH) { 14754 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14755 if ((probe = dtrace_probes[i - 1]) != NULL && 14756 (m = dtrace_match_probe(probe, &pkey, 14757 priv, uid, zoneid)) != 0) 14758 break; 14759 } 14760 14761 if (m < 0) { 14762 mutex_exit(&dtrace_lock); 14763 return (EINVAL); 14764 } 14765 14766 } else { 14767 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14768 if ((probe = dtrace_probes[i - 1]) != NULL && 14769 dtrace_match_priv(probe, priv, uid, zoneid)) 14770 break; 14771 } 14772 } 14773 14774 if (probe == NULL) { 14775 mutex_exit(&dtrace_lock); 14776 return (ESRCH); 14777 } 14778 14779 dtrace_probe_description(probe, &desc); 14780 mutex_exit(&dtrace_lock); 14781 14782 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14783 return (EFAULT); 14784 14785 return (0); 14786 } 14787 14788 case DTRACEIOC_PROBEARG: { 14789 dtrace_argdesc_t desc; 14790 dtrace_probe_t *probe; 14791 dtrace_provider_t *prov; 14792 14793 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14794 return (EFAULT); 14795 14796 if (desc.dtargd_id == DTRACE_IDNONE) 14797 return (EINVAL); 14798 14799 if (desc.dtargd_ndx == DTRACE_ARGNONE) 14800 return (EINVAL); 14801 14802 mutex_enter(&dtrace_provider_lock); 14803 mutex_enter(&mod_lock); 14804 mutex_enter(&dtrace_lock); 14805 14806 if (desc.dtargd_id > dtrace_nprobes) { 14807 mutex_exit(&dtrace_lock); 14808 mutex_exit(&mod_lock); 14809 mutex_exit(&dtrace_provider_lock); 14810 return (EINVAL); 14811 } 14812 14813 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 14814 mutex_exit(&dtrace_lock); 14815 mutex_exit(&mod_lock); 14816 mutex_exit(&dtrace_provider_lock); 14817 return (EINVAL); 14818 } 14819 14820 mutex_exit(&dtrace_lock); 14821 14822 prov = probe->dtpr_provider; 14823 14824 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 14825 /* 14826 * There isn't any typed information for this probe. 14827 * Set the argument number to DTRACE_ARGNONE. 14828 */ 14829 desc.dtargd_ndx = DTRACE_ARGNONE; 14830 } else { 14831 desc.dtargd_native[0] = '\0'; 14832 desc.dtargd_xlate[0] = '\0'; 14833 desc.dtargd_mapping = desc.dtargd_ndx; 14834 14835 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 14836 probe->dtpr_id, probe->dtpr_arg, &desc); 14837 } 14838 14839 mutex_exit(&mod_lock); 14840 mutex_exit(&dtrace_provider_lock); 14841 14842 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14843 return (EFAULT); 14844 14845 return (0); 14846 } 14847 14848 case DTRACEIOC_GO: { 14849 processorid_t cpuid; 14850 rval = dtrace_state_go(state, &cpuid); 14851 14852 if (rval != 0) 14853 return (rval); 14854 14855 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14856 return (EFAULT); 14857 14858 return (0); 14859 } 14860 14861 case DTRACEIOC_STOP: { 14862 processorid_t cpuid; 14863 14864 mutex_enter(&dtrace_lock); 14865 rval = dtrace_state_stop(state, &cpuid); 14866 mutex_exit(&dtrace_lock); 14867 14868 if (rval != 0) 14869 return (rval); 14870 14871 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14872 return (EFAULT); 14873 14874 return (0); 14875 } 14876 14877 case DTRACEIOC_DOFGET: { 14878 dof_hdr_t hdr, *dof; 14879 uint64_t len; 14880 14881 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 14882 return (EFAULT); 14883 14884 mutex_enter(&dtrace_lock); 14885 dof = dtrace_dof_create(state); 14886 mutex_exit(&dtrace_lock); 14887 14888 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 14889 rval = copyout(dof, (void *)arg, len); 14890 dtrace_dof_destroy(dof); 14891 14892 return (rval == 0 ? 0 : EFAULT); 14893 } 14894 14895 case DTRACEIOC_AGGSNAP: 14896 case DTRACEIOC_BUFSNAP: { 14897 dtrace_bufdesc_t desc; 14898 caddr_t cached; 14899 dtrace_buffer_t *buf; 14900 14901 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14902 return (EFAULT); 14903 14904 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 14905 return (EINVAL); 14906 14907 mutex_enter(&dtrace_lock); 14908 14909 if (cmd == DTRACEIOC_BUFSNAP) { 14910 buf = &state->dts_buffer[desc.dtbd_cpu]; 14911 } else { 14912 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 14913 } 14914 14915 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 14916 size_t sz = buf->dtb_offset; 14917 14918 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 14919 mutex_exit(&dtrace_lock); 14920 return (EBUSY); 14921 } 14922 14923 /* 14924 * If this buffer has already been consumed, we're 14925 * going to indicate that there's nothing left here 14926 * to consume. 14927 */ 14928 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 14929 mutex_exit(&dtrace_lock); 14930 14931 desc.dtbd_size = 0; 14932 desc.dtbd_drops = 0; 14933 desc.dtbd_errors = 0; 14934 desc.dtbd_oldest = 0; 14935 sz = sizeof (desc); 14936 14937 if (copyout(&desc, (void *)arg, sz) != 0) 14938 return (EFAULT); 14939 14940 return (0); 14941 } 14942 14943 /* 14944 * If this is a ring buffer that has wrapped, we want 14945 * to copy the whole thing out. 14946 */ 14947 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 14948 dtrace_buffer_polish(buf); 14949 sz = buf->dtb_size; 14950 } 14951 14952 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 14953 mutex_exit(&dtrace_lock); 14954 return (EFAULT); 14955 } 14956 14957 desc.dtbd_size = sz; 14958 desc.dtbd_drops = buf->dtb_drops; 14959 desc.dtbd_errors = buf->dtb_errors; 14960 desc.dtbd_oldest = buf->dtb_xamot_offset; 14961 14962 mutex_exit(&dtrace_lock); 14963 14964 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14965 return (EFAULT); 14966 14967 buf->dtb_flags |= DTRACEBUF_CONSUMED; 14968 14969 return (0); 14970 } 14971 14972 if (buf->dtb_tomax == NULL) { 14973 ASSERT(buf->dtb_xamot == NULL); 14974 mutex_exit(&dtrace_lock); 14975 return (ENOENT); 14976 } 14977 14978 cached = buf->dtb_tomax; 14979 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 14980 14981 dtrace_xcall(desc.dtbd_cpu, 14982 (dtrace_xcall_t)dtrace_buffer_switch, buf); 14983 14984 state->dts_errors += buf->dtb_xamot_errors; 14985 14986 /* 14987 * If the buffers did not actually switch, then the cross call 14988 * did not take place -- presumably because the given CPU is 14989 * not in the ready set. If this is the case, we'll return 14990 * ENOENT. 14991 */ 14992 if (buf->dtb_tomax == cached) { 14993 ASSERT(buf->dtb_xamot != cached); 14994 mutex_exit(&dtrace_lock); 14995 return (ENOENT); 14996 } 14997 14998 ASSERT(cached == buf->dtb_xamot); 14999 15000 /* 15001 * We have our snapshot; now copy it out. 15002 */ 15003 if (copyout(buf->dtb_xamot, desc.dtbd_data, 15004 buf->dtb_xamot_offset) != 0) { 15005 mutex_exit(&dtrace_lock); 15006 return (EFAULT); 15007 } 15008 15009 desc.dtbd_size = buf->dtb_xamot_offset; 15010 desc.dtbd_drops = buf->dtb_xamot_drops; 15011 desc.dtbd_errors = buf->dtb_xamot_errors; 15012 desc.dtbd_oldest = 0; 15013 15014 mutex_exit(&dtrace_lock); 15015 15016 /* 15017 * Finally, copy out the buffer description. 15018 */ 15019 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15020 return (EFAULT); 15021 15022 return (0); 15023 } 15024 15025 case DTRACEIOC_CONF: { 15026 dtrace_conf_t conf; 15027 15028 bzero(&conf, sizeof (conf)); 15029 conf.dtc_difversion = DIF_VERSION; 15030 conf.dtc_difintregs = DIF_DIR_NREGS; 15031 conf.dtc_diftupregs = DIF_DTR_NREGS; 15032 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 15033 15034 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 15035 return (EFAULT); 15036 15037 return (0); 15038 } 15039 15040 case DTRACEIOC_STATUS: { 15041 dtrace_status_t stat; 15042 dtrace_dstate_t *dstate; 15043 int i, j; 15044 uint64_t nerrs; 15045 15046 /* 15047 * See the comment in dtrace_state_deadman() for the reason 15048 * for setting dts_laststatus to INT64_MAX before setting 15049 * it to the correct value. 15050 */ 15051 state->dts_laststatus = INT64_MAX; 15052 dtrace_membar_producer(); 15053 state->dts_laststatus = dtrace_gethrtime(); 15054 15055 bzero(&stat, sizeof (stat)); 15056 15057 mutex_enter(&dtrace_lock); 15058 15059 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 15060 mutex_exit(&dtrace_lock); 15061 return (ENOENT); 15062 } 15063 15064 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 15065 stat.dtst_exiting = 1; 15066 15067 nerrs = state->dts_errors; 15068 dstate = &state->dts_vstate.dtvs_dynvars; 15069 15070 for (i = 0; i < NCPU; i++) { 15071 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 15072 15073 stat.dtst_dyndrops += dcpu->dtdsc_drops; 15074 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 15075 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 15076 15077 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 15078 stat.dtst_filled++; 15079 15080 nerrs += state->dts_buffer[i].dtb_errors; 15081 15082 for (j = 0; j < state->dts_nspeculations; j++) { 15083 dtrace_speculation_t *spec; 15084 dtrace_buffer_t *buf; 15085 15086 spec = &state->dts_speculations[j]; 15087 buf = &spec->dtsp_buffer[i]; 15088 stat.dtst_specdrops += buf->dtb_xamot_drops; 15089 } 15090 } 15091 15092 stat.dtst_specdrops_busy = state->dts_speculations_busy; 15093 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 15094 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 15095 stat.dtst_dblerrors = state->dts_dblerrors; 15096 stat.dtst_killed = 15097 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 15098 stat.dtst_errors = nerrs; 15099 15100 mutex_exit(&dtrace_lock); 15101 15102 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 15103 return (EFAULT); 15104 15105 return (0); 15106 } 15107 15108 case DTRACEIOC_FORMAT: { 15109 dtrace_fmtdesc_t fmt; 15110 char *str; 15111 int len; 15112 15113 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 15114 return (EFAULT); 15115 15116 mutex_enter(&dtrace_lock); 15117 15118 if (fmt.dtfd_format == 0 || 15119 fmt.dtfd_format > state->dts_nformats) { 15120 mutex_exit(&dtrace_lock); 15121 return (EINVAL); 15122 } 15123 15124 /* 15125 * Format strings are allocated contiguously and they are 15126 * never freed; if a format index is less than the number 15127 * of formats, we can assert that the format map is non-NULL 15128 * and that the format for the specified index is non-NULL. 15129 */ 15130 ASSERT(state->dts_formats != NULL); 15131 str = state->dts_formats[fmt.dtfd_format - 1]; 15132 ASSERT(str != NULL); 15133 15134 len = strlen(str) + 1; 15135 15136 if (len > fmt.dtfd_length) { 15137 fmt.dtfd_length = len; 15138 15139 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 15140 mutex_exit(&dtrace_lock); 15141 return (EINVAL); 15142 } 15143 } else { 15144 if (copyout(str, fmt.dtfd_string, len) != 0) { 15145 mutex_exit(&dtrace_lock); 15146 return (EINVAL); 15147 } 15148 } 15149 15150 mutex_exit(&dtrace_lock); 15151 return (0); 15152 } 15153 15154 default: 15155 break; 15156 } 15157 15158 return (ENOTTY); 15159 } 15160 15161 /*ARGSUSED*/ 15162 static int 15163 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 15164 { 15165 dtrace_state_t *state; 15166 15167 switch (cmd) { 15168 case DDI_DETACH: 15169 break; 15170 15171 case DDI_SUSPEND: 15172 return (DDI_SUCCESS); 15173 15174 default: 15175 return (DDI_FAILURE); 15176 } 15177 15178 mutex_enter(&cpu_lock); 15179 mutex_enter(&dtrace_provider_lock); 15180 mutex_enter(&dtrace_lock); 15181 15182 ASSERT(dtrace_opens == 0); 15183 15184 if (dtrace_helpers > 0) { 15185 mutex_exit(&dtrace_provider_lock); 15186 mutex_exit(&dtrace_lock); 15187 mutex_exit(&cpu_lock); 15188 return (DDI_FAILURE); 15189 } 15190 15191 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 15192 mutex_exit(&dtrace_provider_lock); 15193 mutex_exit(&dtrace_lock); 15194 mutex_exit(&cpu_lock); 15195 return (DDI_FAILURE); 15196 } 15197 15198 dtrace_provider = NULL; 15199 15200 if ((state = dtrace_anon_grab()) != NULL) { 15201 /* 15202 * If there were ECBs on this state, the provider should 15203 * have not been allowed to detach; assert that there is 15204 * none. 15205 */ 15206 ASSERT(state->dts_necbs == 0); 15207 dtrace_state_destroy(state); 15208 15209 /* 15210 * If we're being detached with anonymous state, we need to 15211 * indicate to the kernel debugger that DTrace is now inactive. 15212 */ 15213 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15214 } 15215 15216 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 15217 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15218 dtrace_cpu_init = NULL; 15219 dtrace_helpers_cleanup = NULL; 15220 dtrace_helpers_fork = NULL; 15221 dtrace_cpustart_init = NULL; 15222 dtrace_cpustart_fini = NULL; 15223 dtrace_debugger_init = NULL; 15224 dtrace_debugger_fini = NULL; 15225 dtrace_modload = NULL; 15226 dtrace_modunload = NULL; 15227 15228 mutex_exit(&cpu_lock); 15229 15230 if (dtrace_helptrace_enabled) { 15231 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 15232 dtrace_helptrace_buffer = NULL; 15233 } 15234 15235 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 15236 dtrace_probes = NULL; 15237 dtrace_nprobes = 0; 15238 15239 dtrace_hash_destroy(dtrace_bymod); 15240 dtrace_hash_destroy(dtrace_byfunc); 15241 dtrace_hash_destroy(dtrace_byname); 15242 dtrace_bymod = NULL; 15243 dtrace_byfunc = NULL; 15244 dtrace_byname = NULL; 15245 15246 kmem_cache_destroy(dtrace_state_cache); 15247 vmem_destroy(dtrace_minor); 15248 vmem_destroy(dtrace_arena); 15249 15250 if (dtrace_toxrange != NULL) { 15251 kmem_free(dtrace_toxrange, 15252 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 15253 dtrace_toxrange = NULL; 15254 dtrace_toxranges = 0; 15255 dtrace_toxranges_max = 0; 15256 } 15257 15258 ddi_remove_minor_node(dtrace_devi, NULL); 15259 dtrace_devi = NULL; 15260 15261 ddi_soft_state_fini(&dtrace_softstate); 15262 15263 ASSERT(dtrace_vtime_references == 0); 15264 ASSERT(dtrace_opens == 0); 15265 ASSERT(dtrace_retained == NULL); 15266 15267 mutex_exit(&dtrace_lock); 15268 mutex_exit(&dtrace_provider_lock); 15269 15270 /* 15271 * We don't destroy the task queue until after we have dropped our 15272 * locks (taskq_destroy() may block on running tasks). To prevent 15273 * attempting to do work after we have effectively detached but before 15274 * the task queue has been destroyed, all tasks dispatched via the 15275 * task queue must check that DTrace is still attached before 15276 * performing any operation. 15277 */ 15278 taskq_destroy(dtrace_taskq); 15279 dtrace_taskq = NULL; 15280 15281 return (DDI_SUCCESS); 15282 } 15283 15284 /*ARGSUSED*/ 15285 static int 15286 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 15287 { 15288 int error; 15289 15290 switch (infocmd) { 15291 case DDI_INFO_DEVT2DEVINFO: 15292 *result = (void *)dtrace_devi; 15293 error = DDI_SUCCESS; 15294 break; 15295 case DDI_INFO_DEVT2INSTANCE: 15296 *result = (void *)0; 15297 error = DDI_SUCCESS; 15298 break; 15299 default: 15300 error = DDI_FAILURE; 15301 } 15302 return (error); 15303 } 15304 15305 static struct cb_ops dtrace_cb_ops = { 15306 dtrace_open, /* open */ 15307 dtrace_close, /* close */ 15308 nulldev, /* strategy */ 15309 nulldev, /* print */ 15310 nodev, /* dump */ 15311 nodev, /* read */ 15312 nodev, /* write */ 15313 dtrace_ioctl, /* ioctl */ 15314 nodev, /* devmap */ 15315 nodev, /* mmap */ 15316 nodev, /* segmap */ 15317 nochpoll, /* poll */ 15318 ddi_prop_op, /* cb_prop_op */ 15319 0, /* streamtab */ 15320 D_NEW | D_MP /* Driver compatibility flag */ 15321 }; 15322 15323 static struct dev_ops dtrace_ops = { 15324 DEVO_REV, /* devo_rev */ 15325 0, /* refcnt */ 15326 dtrace_info, /* get_dev_info */ 15327 nulldev, /* identify */ 15328 nulldev, /* probe */ 15329 dtrace_attach, /* attach */ 15330 dtrace_detach, /* detach */ 15331 nodev, /* reset */ 15332 &dtrace_cb_ops, /* driver operations */ 15333 NULL, /* bus operations */ 15334 nodev /* dev power */ 15335 }; 15336 15337 static struct modldrv modldrv = { 15338 &mod_driverops, /* module type (this is a pseudo driver) */ 15339 "Dynamic Tracing", /* name of module */ 15340 &dtrace_ops, /* driver ops */ 15341 }; 15342 15343 static struct modlinkage modlinkage = { 15344 MODREV_1, 15345 (void *)&modldrv, 15346 NULL 15347 }; 15348 15349 int 15350 _init(void) 15351 { 15352 return (mod_install(&modlinkage)); 15353 } 15354 15355 int 15356 _info(struct modinfo *modinfop) 15357 { 15358 return (mod_info(&modlinkage, modinfop)); 15359 } 15360 15361 int 15362 _fini(void) 15363 { 15364 return (mod_remove(&modlinkage)); 15365 } 15366