1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_state_t *dtrace_state; /* temporary variable */ 188 static int dtrace_err; /* temporary variable */ 189 190 /* 191 * DTrace Locking 192 * DTrace is protected by three (relatively coarse-grained) locks: 193 * 194 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 195 * including enabling state, probes, ECBs, consumer state, helper state, 196 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 197 * probe context is lock-free -- synchronization is handled via the 198 * dtrace_sync() cross call mechanism. 199 * 200 * (2) dtrace_provider_lock is required when manipulating provider state, or 201 * when provider state must be held constant. 202 * 203 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 204 * when meta provider state must be held constant. 205 * 206 * The lock ordering between these three locks is dtrace_meta_lock before 207 * dtrace_provider_lock before dtrace_lock. (In particular, there are 208 * several places where dtrace_provider_lock is held by the framework as it 209 * calls into the providers -- which then call back into the framework, 210 * grabbing dtrace_lock.) 211 * 212 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 213 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 214 * role as a coarse-grained lock; it is acquired before both of these locks. 215 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 216 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 217 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 218 * acquired _between_ dtrace_provider_lock and dtrace_lock. 219 */ 220 static kmutex_t dtrace_lock; /* probe state lock */ 221 static kmutex_t dtrace_provider_lock; /* provider state lock */ 222 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 223 224 /* 225 * DTrace Provider Variables 226 * 227 * These are the variables relating to DTrace as a provider (that is, the 228 * provider of the BEGIN, END, and ERROR probes). 229 */ 230 static dtrace_pattr_t dtrace_provider_attr = { 231 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 232 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 233 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 235 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 236 }; 237 238 static void 239 dtrace_nullop(void) 240 {} 241 242 static dtrace_pops_t dtrace_provider_ops = { 243 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 244 (void (*)(void *, struct modctl *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 248 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 249 NULL, 250 NULL, 251 NULL, 252 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 253 }; 254 255 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 256 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 257 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 258 259 /* 260 * DTrace Helper Tracing Variables 261 */ 262 uint32_t dtrace_helptrace_next = 0; 263 uint32_t dtrace_helptrace_nlocals; 264 char *dtrace_helptrace_buffer; 265 int dtrace_helptrace_bufsize = 512 * 1024; 266 267 #ifdef DEBUG 268 int dtrace_helptrace_enabled = 1; 269 #else 270 int dtrace_helptrace_enabled = 0; 271 #endif 272 273 /* 274 * DTrace Error Hashing 275 * 276 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 277 * table. This is very useful for checking coverage of tests that are 278 * expected to induce DIF or DOF processing errors, and may be useful for 279 * debugging problems in the DIF code generator or in DOF generation . The 280 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 281 */ 282 #ifdef DEBUG 283 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 284 static const char *dtrace_errlast; 285 static kthread_t *dtrace_errthread; 286 static kmutex_t dtrace_errlock; 287 #endif 288 289 /* 290 * DTrace Macros and Constants 291 * 292 * These are various macros that are useful in various spots in the 293 * implementation, along with a few random constants that have no meaning 294 * outside of the implementation. There is no real structure to this cpp 295 * mishmash -- but is there ever? 296 */ 297 #define DTRACE_HASHSTR(hash, probe) \ 298 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 299 300 #define DTRACE_HASHNEXT(hash, probe) \ 301 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 302 303 #define DTRACE_HASHPREV(hash, probe) \ 304 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 305 306 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 307 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 308 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 309 310 #define DTRACE_AGGHASHSIZE_SLEW 17 311 312 /* 313 * The key for a thread-local variable consists of the lower 61 bits of the 314 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 315 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 316 * equal to a variable identifier. This is necessary (but not sufficient) to 317 * assure that global associative arrays never collide with thread-local 318 * variables. To guarantee that they cannot collide, we must also define the 319 * order for keying dynamic variables. That order is: 320 * 321 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 322 * 323 * Because the variable-key and the tls-key are in orthogonal spaces, there is 324 * no way for a global variable key signature to match a thread-local key 325 * signature. 326 */ 327 #define DTRACE_TLS_THRKEY(where) { \ 328 uint_t intr = 0; \ 329 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 330 for (; actv; actv >>= 1) \ 331 intr++; \ 332 ASSERT(intr < (1 << 3)); \ 333 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 334 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 335 } 336 337 #define DTRACE_STORE(type, tomax, offset, what) \ 338 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 339 340 #ifndef __i386 341 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 342 if (addr & (size - 1)) { \ 343 *flags |= CPU_DTRACE_BADALIGN; \ 344 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 345 return (0); \ 346 } 347 #else 348 #define DTRACE_ALIGNCHECK(addr, size, flags) 349 #endif 350 351 #define DTRACE_LOADFUNC(bits) \ 352 /*CSTYLED*/ \ 353 uint##bits##_t \ 354 dtrace_load##bits(uintptr_t addr) \ 355 { \ 356 size_t size = bits / NBBY; \ 357 /*CSTYLED*/ \ 358 uint##bits##_t rval; \ 359 int i; \ 360 volatile uint16_t *flags = (volatile uint16_t *) \ 361 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 362 \ 363 DTRACE_ALIGNCHECK(addr, size, flags); \ 364 \ 365 for (i = 0; i < dtrace_toxranges; i++) { \ 366 if (addr >= dtrace_toxrange[i].dtt_limit) \ 367 continue; \ 368 \ 369 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 370 continue; \ 371 \ 372 /* \ 373 * This address falls within a toxic region; return 0. \ 374 */ \ 375 *flags |= CPU_DTRACE_BADADDR; \ 376 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 377 return (0); \ 378 } \ 379 \ 380 *flags |= CPU_DTRACE_NOFAULT; \ 381 /*CSTYLED*/ \ 382 rval = *((volatile uint##bits##_t *)addr); \ 383 *flags &= ~CPU_DTRACE_NOFAULT; \ 384 \ 385 return (rval); \ 386 } 387 388 #ifdef _LP64 389 #define dtrace_loadptr dtrace_load64 390 #else 391 #define dtrace_loadptr dtrace_load32 392 #endif 393 394 #define DTRACE_MATCH_NEXT 0 395 #define DTRACE_MATCH_DONE 1 396 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 397 #define DTRACE_STATE_ALIGN 64 398 399 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 400 static void dtrace_enabling_provide(dtrace_provider_t *); 401 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 402 static void dtrace_enabling_matchall(void); 403 static dtrace_state_t *dtrace_anon_grab(void); 404 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 405 dtrace_state_t *, uint64_t, uint64_t); 406 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 407 static void dtrace_buffer_drop(dtrace_buffer_t *); 408 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 409 dtrace_state_t *, dtrace_mstate_t *); 410 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 411 dtrace_optval_t); 412 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 413 414 /* 415 * DTrace Probe Context Functions 416 * 417 * These functions are called from probe context. Because probe context is 418 * any context in which C may be called, arbitrarily locks may be held, 419 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 420 * As a result, functions called from probe context may only call other DTrace 421 * support functions -- they may not interact at all with the system at large. 422 * (Note that the ASSERT macro is made probe-context safe by redefining it in 423 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 424 * loads are to be performed from probe context, they _must_ be in terms of 425 * the safe dtrace_load*() variants. 426 * 427 * Some functions in this block are not actually called from probe context; 428 * for these functions, there will be a comment above the function reading 429 * "Note: not called from probe context." 430 */ 431 void 432 dtrace_panic(const char *format, ...) 433 { 434 va_list alist; 435 436 va_start(alist, format); 437 dtrace_vpanic(format, alist); 438 va_end(alist); 439 } 440 441 int 442 dtrace_assfail(const char *a, const char *f, int l) 443 { 444 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 445 446 /* 447 * We just need something here that even the most clever compiler 448 * cannot optimize away. 449 */ 450 return (a[(uintptr_t)f]); 451 } 452 453 /* 454 * Atomically increment a specified error counter from probe context. 455 */ 456 static void 457 dtrace_error(uint32_t *counter) 458 { 459 /* 460 * Most counters stored to in probe context are per-CPU counters. 461 * However, there are some error conditions that are sufficiently 462 * arcane that they don't merit per-CPU storage. If these counters 463 * are incremented concurrently on different CPUs, scalability will be 464 * adversely affected -- but we don't expect them to be white-hot in a 465 * correctly constructed enabling... 466 */ 467 uint32_t oval, nval; 468 469 do { 470 oval = *counter; 471 472 if ((nval = oval + 1) == 0) { 473 /* 474 * If the counter would wrap, set it to 1 -- assuring 475 * that the counter is never zero when we have seen 476 * errors. (The counter must be 32-bits because we 477 * aren't guaranteed a 64-bit compare&swap operation.) 478 * To save this code both the infamy of being fingered 479 * by a priggish news story and the indignity of being 480 * the target of a neo-puritan witch trial, we're 481 * carefully avoiding any colorful description of the 482 * likelihood of this condition -- but suffice it to 483 * say that it is only slightly more likely than the 484 * overflow of predicate cache IDs, as discussed in 485 * dtrace_predicate_create(). 486 */ 487 nval = 1; 488 } 489 } while (dtrace_cas32(counter, oval, nval) != oval); 490 } 491 492 /* 493 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 494 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 495 */ 496 DTRACE_LOADFUNC(8) 497 DTRACE_LOADFUNC(16) 498 DTRACE_LOADFUNC(32) 499 DTRACE_LOADFUNC(64) 500 501 static int 502 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 503 { 504 if (dest < mstate->dtms_scratch_base) 505 return (0); 506 507 if (dest + size < dest) 508 return (0); 509 510 if (dest + size > mstate->dtms_scratch_ptr) 511 return (0); 512 513 return (1); 514 } 515 516 static int 517 dtrace_canstore_statvar(uint64_t addr, size_t sz, 518 dtrace_statvar_t **svars, int nsvars) 519 { 520 int i; 521 522 for (i = 0; i < nsvars; i++) { 523 dtrace_statvar_t *svar = svars[i]; 524 525 if (svar == NULL || svar->dtsv_size == 0) 526 continue; 527 528 if (addr - svar->dtsv_data < svar->dtsv_size && 529 addr + sz <= svar->dtsv_data + svar->dtsv_size) 530 return (1); 531 } 532 533 return (0); 534 } 535 536 /* 537 * Check to see if the address is within a memory region to which a store may 538 * be issued. This includes the DTrace scratch areas, and any DTrace variable 539 * region. The caller of dtrace_canstore() is responsible for performing any 540 * alignment checks that are needed before stores are actually executed. 541 */ 542 static int 543 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 544 dtrace_vstate_t *vstate) 545 { 546 uintptr_t a; 547 size_t s; 548 549 /* 550 * First, check to see if the address is in scratch space... 551 */ 552 a = mstate->dtms_scratch_base; 553 s = mstate->dtms_scratch_size; 554 555 if (addr - a < s && addr + sz <= a + s) 556 return (1); 557 558 /* 559 * Now check to see if it's a dynamic variable. This check will pick 560 * up both thread-local variables and any global dynamically-allocated 561 * variables. 562 */ 563 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base; 564 s = vstate->dtvs_dynvars.dtds_size; 565 if (addr - a < s && addr + sz <= a + s) 566 return (1); 567 568 /* 569 * Finally, check the static local and global variables. These checks 570 * take the longest, so we perform them last. 571 */ 572 if (dtrace_canstore_statvar(addr, sz, 573 vstate->dtvs_locals, vstate->dtvs_nlocals)) 574 return (1); 575 576 if (dtrace_canstore_statvar(addr, sz, 577 vstate->dtvs_globals, vstate->dtvs_nglobals)) 578 return (1); 579 580 return (0); 581 } 582 583 /* 584 * Compare two strings using safe loads. 585 */ 586 static int 587 dtrace_strncmp(char *s1, char *s2, size_t limit) 588 { 589 uint8_t c1, c2; 590 volatile uint16_t *flags; 591 592 if (s1 == s2 || limit == 0) 593 return (0); 594 595 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 596 597 do { 598 if (s1 == NULL) { 599 c1 = '\0'; 600 } else { 601 c1 = dtrace_load8((uintptr_t)s1++); 602 } 603 604 if (s2 == NULL) { 605 c2 = '\0'; 606 } else { 607 c2 = dtrace_load8((uintptr_t)s2++); 608 } 609 610 if (c1 != c2) 611 return (c1 - c2); 612 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 613 614 return (0); 615 } 616 617 /* 618 * Compute strlen(s) for a string using safe memory accesses. The additional 619 * len parameter is used to specify a maximum length to ensure completion. 620 */ 621 static size_t 622 dtrace_strlen(const char *s, size_t lim) 623 { 624 uint_t len; 625 626 for (len = 0; len != lim; len++) { 627 if (dtrace_load8((uintptr_t)s++) == '\0') 628 break; 629 } 630 631 return (len); 632 } 633 634 /* 635 * Check if an address falls within a toxic region. 636 */ 637 static int 638 dtrace_istoxic(uintptr_t kaddr, size_t size) 639 { 640 uintptr_t taddr, tsize; 641 int i; 642 643 for (i = 0; i < dtrace_toxranges; i++) { 644 taddr = dtrace_toxrange[i].dtt_base; 645 tsize = dtrace_toxrange[i].dtt_limit - taddr; 646 647 if (kaddr - taddr < tsize) { 648 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 649 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 650 return (1); 651 } 652 653 if (taddr - kaddr < size) { 654 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 655 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 656 return (1); 657 } 658 } 659 660 return (0); 661 } 662 663 /* 664 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 665 * memory specified by the DIF program. The dst is assumed to be safe memory 666 * that we can store to directly because it is managed by DTrace. As with 667 * standard bcopy, overlapping copies are handled properly. 668 */ 669 static void 670 dtrace_bcopy(const void *src, void *dst, size_t len) 671 { 672 if (len != 0) { 673 uint8_t *s1 = dst; 674 const uint8_t *s2 = src; 675 676 if (s1 <= s2) { 677 do { 678 *s1++ = dtrace_load8((uintptr_t)s2++); 679 } while (--len != 0); 680 } else { 681 s2 += len; 682 s1 += len; 683 684 do { 685 *--s1 = dtrace_load8((uintptr_t)--s2); 686 } while (--len != 0); 687 } 688 } 689 } 690 691 /* 692 * Copy src to dst using safe memory accesses, up to either the specified 693 * length, or the point that a nul byte is encountered. The src is assumed to 694 * be unsafe memory specified by the DIF program. The dst is assumed to be 695 * safe memory that we can store to directly because it is managed by DTrace. 696 * Unlike dtrace_bcopy(), overlapping regions are not handled. 697 */ 698 static void 699 dtrace_strcpy(const void *src, void *dst, size_t len) 700 { 701 if (len != 0) { 702 uint8_t *s1 = dst, c; 703 const uint8_t *s2 = src; 704 705 do { 706 *s1++ = c = dtrace_load8((uintptr_t)s2++); 707 } while (--len != 0 && c != '\0'); 708 } 709 } 710 711 /* 712 * Copy src to dst, deriving the size and type from the specified (BYREF) 713 * variable type. The src is assumed to be unsafe memory specified by the DIF 714 * program. The dst is assumed to be DTrace variable memory that is of the 715 * specified type; we assume that we can store to directly. 716 */ 717 static void 718 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 719 { 720 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 721 722 if (type->dtdt_kind == DIF_TYPE_STRING) { 723 dtrace_strcpy(src, dst, type->dtdt_size); 724 } else { 725 dtrace_bcopy(src, dst, type->dtdt_size); 726 } 727 } 728 729 /* 730 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 731 * unsafe memory specified by the DIF program. The s2 data is assumed to be 732 * safe memory that we can access directly because it is managed by DTrace. 733 */ 734 static int 735 dtrace_bcmp(const void *s1, const void *s2, size_t len) 736 { 737 volatile uint16_t *flags; 738 739 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 740 741 if (s1 == s2) 742 return (0); 743 744 if (s1 == NULL || s2 == NULL) 745 return (1); 746 747 if (s1 != s2 && len != 0) { 748 const uint8_t *ps1 = s1; 749 const uint8_t *ps2 = s2; 750 751 do { 752 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 753 return (1); 754 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 755 } 756 return (0); 757 } 758 759 /* 760 * Zero the specified region using a simple byte-by-byte loop. Note that this 761 * is for safe DTrace-managed memory only. 762 */ 763 static void 764 dtrace_bzero(void *dst, size_t len) 765 { 766 uchar_t *cp; 767 768 for (cp = dst; len != 0; len--) 769 *cp++ = 0; 770 } 771 772 /* 773 * This privilege checks should be used by actions and subroutines to 774 * verify the credentials of the process that enabled the invoking ECB. 775 */ 776 static int 777 dtrace_priv_proc_common(dtrace_state_t *state) 778 { 779 uid_t uid = state->dts_cred.dcr_uid; 780 gid_t gid = state->dts_cred.dcr_gid; 781 cred_t *cr; 782 proc_t *proc; 783 784 if ((cr = CRED()) != NULL && 785 uid == cr->cr_uid && 786 uid == cr->cr_ruid && 787 uid == cr->cr_suid && 788 gid == cr->cr_gid && 789 gid == cr->cr_rgid && 790 gid == cr->cr_sgid && 791 (proc = ttoproc(curthread)) != NULL && 792 !(proc->p_flag & SNOCD)) 793 return (1); 794 795 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 796 797 return (0); 798 } 799 800 static int 801 dtrace_priv_proc_destructive(dtrace_state_t *state) 802 { 803 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_DESTRUCTIVE) 804 return (1); 805 806 return (dtrace_priv_proc_common(state)); 807 } 808 809 static int 810 dtrace_priv_proc_control(dtrace_state_t *state) 811 { 812 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 813 return (1); 814 815 return (dtrace_priv_proc_common(state)); 816 } 817 818 static int 819 dtrace_priv_proc(dtrace_state_t *state) 820 { 821 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 822 return (1); 823 824 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 825 826 return (0); 827 } 828 829 static int 830 dtrace_priv_kernel(dtrace_state_t *state) 831 { 832 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 833 return (1); 834 835 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 836 837 return (0); 838 } 839 840 static int 841 dtrace_priv_kernel_destructive(dtrace_state_t *state) 842 { 843 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 844 return (1); 845 846 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 847 848 return (0); 849 } 850 851 /* 852 * Note: not called from probe context. This function is called 853 * asynchronously (and at a regular interval) from outside of probe context to 854 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 855 * cleaning is explained in detail in <sys/dtrace_impl.h>. 856 */ 857 void 858 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 859 { 860 dtrace_dynvar_t *dirty; 861 dtrace_dstate_percpu_t *dcpu; 862 int i, work = 0; 863 864 for (i = 0; i < NCPU; i++) { 865 dcpu = &dstate->dtds_percpu[i]; 866 867 ASSERT(dcpu->dtdsc_rinsing == NULL); 868 869 /* 870 * If the dirty list is NULL, there is no dirty work to do. 871 */ 872 if (dcpu->dtdsc_dirty == NULL) 873 continue; 874 875 /* 876 * If the clean list is non-NULL, then we're not going to do 877 * any work for this CPU -- it means that there has not been 878 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 879 * since the last time we cleaned house. 880 */ 881 if (dcpu->dtdsc_clean != NULL) 882 continue; 883 884 work = 1; 885 886 /* 887 * Atomically move the dirty list aside. 888 */ 889 do { 890 dirty = dcpu->dtdsc_dirty; 891 892 /* 893 * Before we zap the dirty list, set the rinsing list. 894 * (This allows for a potential assertion in 895 * dtrace_dynvar(): if a free dynamic variable appears 896 * on a hash chain, either the dirty list or the 897 * rinsing list for some CPU must be non-NULL.) 898 */ 899 dcpu->dtdsc_rinsing = dirty; 900 dtrace_membar_producer(); 901 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 902 dirty, NULL) != dirty); 903 } 904 905 if (!work) { 906 /* 907 * We have no work to do; we can simply return. 908 */ 909 return; 910 } 911 912 dtrace_sync(); 913 914 for (i = 0; i < NCPU; i++) { 915 dcpu = &dstate->dtds_percpu[i]; 916 917 if (dcpu->dtdsc_rinsing == NULL) 918 continue; 919 920 /* 921 * We are now guaranteed that no hash chain contains a pointer 922 * into this dirty list; we can make it clean. 923 */ 924 ASSERT(dcpu->dtdsc_clean == NULL); 925 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 926 dcpu->dtdsc_rinsing = NULL; 927 } 928 929 /* 930 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 931 * sure that all CPUs have seen all of the dtdsc_clean pointers. 932 * This prevents a race whereby a CPU incorrectly decides that 933 * the state should be something other than DTRACE_DSTATE_CLEAN 934 * after dtrace_dynvar_clean() has completed. 935 */ 936 dtrace_sync(); 937 938 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 939 } 940 941 /* 942 * Depending on the value of the op parameter, this function looks-up, 943 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 944 * allocation is requested, this function will return a pointer to a 945 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 946 * variable can be allocated. If NULL is returned, the appropriate counter 947 * will be incremented. 948 */ 949 dtrace_dynvar_t * 950 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 951 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op) 952 { 953 uint64_t hashval = 1; 954 dtrace_dynhash_t *hash = dstate->dtds_hash; 955 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 956 processorid_t me = CPU->cpu_id, cpu = me; 957 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 958 size_t bucket, ksize; 959 size_t chunksize = dstate->dtds_chunksize; 960 uintptr_t kdata, lock, nstate; 961 uint_t i; 962 963 ASSERT(nkeys != 0); 964 965 /* 966 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 967 * algorithm. For the by-value portions, we perform the algorithm in 968 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 969 * bit, and seems to have only a minute effect on distribution. For 970 * the by-reference data, we perform "One-at-a-time" iterating (safely) 971 * over each referenced byte. It's painful to do this, but it's much 972 * better than pathological hash distribution. The efficacy of the 973 * hashing algorithm (and a comparison with other algorithms) may be 974 * found by running the ::dtrace_dynstat MDB dcmd. 975 */ 976 for (i = 0; i < nkeys; i++) { 977 if (key[i].dttk_size == 0) { 978 uint64_t val = key[i].dttk_value; 979 980 hashval += (val >> 48) & 0xffff; 981 hashval += (hashval << 10); 982 hashval ^= (hashval >> 6); 983 984 hashval += (val >> 32) & 0xffff; 985 hashval += (hashval << 10); 986 hashval ^= (hashval >> 6); 987 988 hashval += (val >> 16) & 0xffff; 989 hashval += (hashval << 10); 990 hashval ^= (hashval >> 6); 991 992 hashval += val & 0xffff; 993 hashval += (hashval << 10); 994 hashval ^= (hashval >> 6); 995 } else { 996 /* 997 * This is incredibly painful, but it beats the hell 998 * out of the alternative. 999 */ 1000 uint64_t j, size = key[i].dttk_size; 1001 uintptr_t base = (uintptr_t)key[i].dttk_value; 1002 1003 for (j = 0; j < size; j++) { 1004 hashval += dtrace_load8(base + j); 1005 hashval += (hashval << 10); 1006 hashval ^= (hashval >> 6); 1007 } 1008 } 1009 } 1010 1011 hashval += (hashval << 3); 1012 hashval ^= (hashval >> 11); 1013 hashval += (hashval << 15); 1014 1015 /* 1016 * There is a remote chance (ideally, 1 in 2^32) that our hashval 1017 * comes out to be 0. We rely on a zero hashval denoting a free 1018 * element; if this actually happens, we set the hashval to 1. 1019 */ 1020 if (hashval == 0) 1021 hashval = 1; 1022 1023 /* 1024 * Yes, it's painful to do a divide here. If the cycle count becomes 1025 * important here, tricks can be pulled to reduce it. (However, it's 1026 * critical that hash collisions be kept to an absolute minimum; 1027 * they're much more painful than a divide.) It's better to have a 1028 * solution that generates few collisions and still keeps things 1029 * relatively simple. 1030 */ 1031 bucket = hashval % dstate->dtds_hashsize; 1032 1033 if (op == DTRACE_DYNVAR_DEALLOC) { 1034 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1035 1036 for (;;) { 1037 while ((lock = *lockp) & 1) 1038 continue; 1039 1040 if (dtrace_casptr((void *)lockp, 1041 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1042 break; 1043 } 1044 1045 dtrace_membar_producer(); 1046 } 1047 1048 top: 1049 prev = NULL; 1050 lock = hash[bucket].dtdh_lock; 1051 1052 dtrace_membar_consumer(); 1053 1054 start = hash[bucket].dtdh_chain; 1055 ASSERT(start == NULL || start->dtdv_hashval != 0 || 1056 op != DTRACE_DYNVAR_DEALLOC); 1057 1058 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1059 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1060 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1061 1062 if (dvar->dtdv_hashval != hashval) { 1063 if (dvar->dtdv_hashval == 0) { 1064 /* 1065 * We've gone off the rails. Somewhere 1066 * along the line, one of the members of this 1067 * hash chain was deleted. We could assert 1068 * that either the dirty list or the rinsing 1069 * list is non-NULL. (The dtrace_sync() in 1070 * dtrace_dynvar_clean() would validate this 1071 * assertion.) 1072 */ 1073 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1074 goto top; 1075 } 1076 1077 goto next; 1078 } 1079 1080 if (dtuple->dtt_nkeys != nkeys) 1081 goto next; 1082 1083 for (i = 0; i < nkeys; i++, dkey++) { 1084 if (dkey->dttk_size != key[i].dttk_size) 1085 goto next; /* size or type mismatch */ 1086 1087 if (dkey->dttk_size != 0) { 1088 if (dtrace_bcmp( 1089 (void *)(uintptr_t)key[i].dttk_value, 1090 (void *)(uintptr_t)dkey->dttk_value, 1091 dkey->dttk_size)) 1092 goto next; 1093 } else { 1094 if (dkey->dttk_value != key[i].dttk_value) 1095 goto next; 1096 } 1097 } 1098 1099 if (op != DTRACE_DYNVAR_DEALLOC) 1100 return (dvar); 1101 1102 ASSERT(dvar->dtdv_next == NULL || 1103 dvar->dtdv_next->dtdv_hashval != 0); 1104 1105 if (prev != NULL) { 1106 ASSERT(hash[bucket].dtdh_chain != dvar); 1107 ASSERT(start != dvar); 1108 ASSERT(prev->dtdv_next == dvar); 1109 prev->dtdv_next = dvar->dtdv_next; 1110 } else { 1111 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1112 start, dvar->dtdv_next) != start) { 1113 /* 1114 * We have failed to atomically swing the 1115 * hash table head pointer, presumably because 1116 * of a conflicting allocation on another CPU. 1117 * We need to reread the hash chain and try 1118 * again. 1119 */ 1120 goto top; 1121 } 1122 } 1123 1124 dtrace_membar_producer(); 1125 1126 /* 1127 * Now clear the hash value to indicate that it's free. 1128 */ 1129 ASSERT(hash[bucket].dtdh_chain != dvar); 1130 dvar->dtdv_hashval = 0; 1131 1132 dtrace_membar_producer(); 1133 1134 /* 1135 * Set the next pointer to point at the dirty list, and 1136 * atomically swing the dirty pointer to the newly freed dvar. 1137 */ 1138 do { 1139 next = dcpu->dtdsc_dirty; 1140 dvar->dtdv_next = next; 1141 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1142 1143 /* 1144 * Finally, unlock this hash bucket. 1145 */ 1146 ASSERT(hash[bucket].dtdh_lock == lock); 1147 ASSERT(lock & 1); 1148 hash[bucket].dtdh_lock++; 1149 1150 return (NULL); 1151 next: 1152 prev = dvar; 1153 continue; 1154 } 1155 1156 if (op != DTRACE_DYNVAR_ALLOC) { 1157 /* 1158 * If we are not to allocate a new variable, we want to 1159 * return NULL now. Before we return, check that the value 1160 * of the lock word hasn't changed. If it has, we may have 1161 * seen an inconsistent snapshot. 1162 */ 1163 if (op == DTRACE_DYNVAR_NOALLOC) { 1164 if (hash[bucket].dtdh_lock != lock) 1165 goto top; 1166 } else { 1167 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1168 ASSERT(hash[bucket].dtdh_lock == lock); 1169 ASSERT(lock & 1); 1170 hash[bucket].dtdh_lock++; 1171 } 1172 1173 return (NULL); 1174 } 1175 1176 /* 1177 * We need to allocate a new dynamic variable. The size we need is the 1178 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1179 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1180 * the size of any referred-to data (dsize). We then round the final 1181 * size up to the chunksize for allocation. 1182 */ 1183 for (ksize = 0, i = 0; i < nkeys; i++) 1184 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1185 1186 /* 1187 * This should be pretty much impossible, but could happen if, say, 1188 * strange DIF specified the tuple. Ideally, this should be an 1189 * assertion and not an error condition -- but that requires that the 1190 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1191 * bullet-proof. (That is, it must not be able to be fooled by 1192 * malicious DIF.) Given the lack of backwards branches in DIF, 1193 * solving this would presumably not amount to solving the Halting 1194 * Problem -- but it still seems awfully hard. 1195 */ 1196 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1197 ksize + dsize > chunksize) { 1198 dcpu->dtdsc_drops++; 1199 return (NULL); 1200 } 1201 1202 nstate = DTRACE_DSTATE_EMPTY; 1203 1204 do { 1205 retry: 1206 free = dcpu->dtdsc_free; 1207 1208 if (free == NULL) { 1209 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1210 void *rval; 1211 1212 if (clean == NULL) { 1213 /* 1214 * We're out of dynamic variable space on 1215 * this CPU. Unless we have tried all CPUs, 1216 * we'll try to allocate from a different 1217 * CPU. 1218 */ 1219 switch (dstate->dtds_state) { 1220 case DTRACE_DSTATE_CLEAN: { 1221 void *sp = &dstate->dtds_state; 1222 1223 if (++cpu >= NCPU) 1224 cpu = 0; 1225 1226 if (dcpu->dtdsc_dirty != NULL && 1227 nstate == DTRACE_DSTATE_EMPTY) 1228 nstate = DTRACE_DSTATE_DIRTY; 1229 1230 if (dcpu->dtdsc_rinsing != NULL) 1231 nstate = DTRACE_DSTATE_RINSING; 1232 1233 dcpu = &dstate->dtds_percpu[cpu]; 1234 1235 if (cpu != me) 1236 goto retry; 1237 1238 (void) dtrace_cas32(sp, 1239 DTRACE_DSTATE_CLEAN, nstate); 1240 1241 /* 1242 * To increment the correct bean 1243 * counter, take another lap. 1244 */ 1245 goto retry; 1246 } 1247 1248 case DTRACE_DSTATE_DIRTY: 1249 dcpu->dtdsc_dirty_drops++; 1250 break; 1251 1252 case DTRACE_DSTATE_RINSING: 1253 dcpu->dtdsc_rinsing_drops++; 1254 break; 1255 1256 case DTRACE_DSTATE_EMPTY: 1257 dcpu->dtdsc_drops++; 1258 break; 1259 } 1260 1261 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1262 return (NULL); 1263 } 1264 1265 /* 1266 * The clean list appears to be non-empty. We want to 1267 * move the clean list to the free list; we start by 1268 * moving the clean pointer aside. 1269 */ 1270 if (dtrace_casptr(&dcpu->dtdsc_clean, 1271 clean, NULL) != clean) { 1272 /* 1273 * We are in one of two situations: 1274 * 1275 * (a) The clean list was switched to the 1276 * free list by another CPU. 1277 * 1278 * (b) The clean list was added to by the 1279 * cleansing cyclic. 1280 * 1281 * In either of these situations, we can 1282 * just reattempt the free list allocation. 1283 */ 1284 goto retry; 1285 } 1286 1287 ASSERT(clean->dtdv_hashval == 0); 1288 1289 /* 1290 * Now we'll move the clean list to the free list. 1291 * It's impossible for this to fail: the only way 1292 * the free list can be updated is through this 1293 * code path, and only one CPU can own the clean list. 1294 * Thus, it would only be possible for this to fail if 1295 * this code were racing with dtrace_dynvar_clean(). 1296 * (That is, if dtrace_dynvar_clean() updated the clean 1297 * list, and we ended up racing to update the free 1298 * list.) This race is prevented by the dtrace_sync() 1299 * in dtrace_dynvar_clean() -- which flushes the 1300 * owners of the clean lists out before resetting 1301 * the clean lists. 1302 */ 1303 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1304 ASSERT(rval == NULL); 1305 goto retry; 1306 } 1307 1308 dvar = free; 1309 new_free = dvar->dtdv_next; 1310 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1311 1312 /* 1313 * We have now allocated a new chunk. We copy the tuple keys into the 1314 * tuple array and copy any referenced key data into the data space 1315 * following the tuple array. As we do this, we relocate dttk_value 1316 * in the final tuple to point to the key data address in the chunk. 1317 */ 1318 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1319 dvar->dtdv_data = (void *)(kdata + ksize); 1320 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1321 1322 for (i = 0; i < nkeys; i++) { 1323 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1324 size_t kesize = key[i].dttk_size; 1325 1326 if (kesize != 0) { 1327 dtrace_bcopy( 1328 (const void *)(uintptr_t)key[i].dttk_value, 1329 (void *)kdata, kesize); 1330 dkey->dttk_value = kdata; 1331 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1332 } else { 1333 dkey->dttk_value = key[i].dttk_value; 1334 } 1335 1336 dkey->dttk_size = kesize; 1337 } 1338 1339 ASSERT(dvar->dtdv_hashval == 0); 1340 dvar->dtdv_hashval = hashval; 1341 dvar->dtdv_next = start; 1342 1343 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1344 return (dvar); 1345 1346 /* 1347 * The cas has failed. Either another CPU is adding an element to 1348 * this hash chain, or another CPU is deleting an element from this 1349 * hash chain. The simplest way to deal with both of these cases 1350 * (though not necessarily the most efficient) is to free our 1351 * allocated block and tail-call ourselves. Note that the free is 1352 * to the dirty list and _not_ to the free list. This is to prevent 1353 * races with allocators, above. 1354 */ 1355 dvar->dtdv_hashval = 0; 1356 1357 dtrace_membar_producer(); 1358 1359 do { 1360 free = dcpu->dtdsc_dirty; 1361 dvar->dtdv_next = free; 1362 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1363 1364 return (dtrace_dynvar(dstate, nkeys, key, dsize, op)); 1365 } 1366 1367 /*ARGSUSED*/ 1368 static void 1369 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1370 { 1371 if (nval < *oval) 1372 *oval = nval; 1373 } 1374 1375 /*ARGSUSED*/ 1376 static void 1377 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1378 { 1379 if (nval > *oval) 1380 *oval = nval; 1381 } 1382 1383 static void 1384 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1385 { 1386 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1387 int64_t val = (int64_t)nval; 1388 1389 if (val < 0) { 1390 for (i = 0; i < zero; i++) { 1391 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1392 quanta[i] += incr; 1393 return; 1394 } 1395 } 1396 } else { 1397 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1398 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1399 quanta[i - 1] += incr; 1400 return; 1401 } 1402 } 1403 1404 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1405 return; 1406 } 1407 1408 ASSERT(0); 1409 } 1410 1411 static void 1412 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1413 { 1414 uint64_t arg = *lquanta++; 1415 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1416 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1417 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1418 int32_t val = (int32_t)nval, level; 1419 1420 ASSERT(step != 0); 1421 ASSERT(levels != 0); 1422 1423 if (val < base) { 1424 /* 1425 * This is an underflow. 1426 */ 1427 lquanta[0] += incr; 1428 return; 1429 } 1430 1431 level = (val - base) / step; 1432 1433 if (level < levels) { 1434 lquanta[level + 1] += incr; 1435 return; 1436 } 1437 1438 /* 1439 * This is an overflow. 1440 */ 1441 lquanta[levels + 1] += incr; 1442 } 1443 1444 /*ARGSUSED*/ 1445 static void 1446 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1447 { 1448 data[0]++; 1449 data[1] += nval; 1450 } 1451 1452 /*ARGSUSED*/ 1453 static void 1454 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1455 { 1456 *oval = *oval + 1; 1457 } 1458 1459 /*ARGSUSED*/ 1460 static void 1461 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1462 { 1463 *oval += nval; 1464 } 1465 1466 /* 1467 * Aggregate given the tuple in the principal data buffer, and the aggregating 1468 * action denoted by the specified dtrace_aggregation_t. The aggregation 1469 * buffer is specified as the buf parameter. This routine does not return 1470 * failure; if there is no space in the aggregation buffer, the data will be 1471 * dropped, and a corresponding counter incremented. 1472 */ 1473 static void 1474 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1475 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1476 { 1477 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1478 uint32_t i, ndx, size, fsize; 1479 uint32_t align = sizeof (uint64_t) - 1; 1480 dtrace_aggbuffer_t *agb; 1481 dtrace_aggkey_t *key; 1482 uint32_t hashval = 0; 1483 caddr_t tomax, data, kdata; 1484 dtrace_actkind_t action; 1485 uintptr_t offs; 1486 1487 if (buf == NULL) 1488 return; 1489 1490 if (!agg->dtag_hasarg) { 1491 /* 1492 * Currently, only quantize() and lquantize() take additional 1493 * arguments, and they have the same semantics: an increment 1494 * value that defaults to 1 when not present. If additional 1495 * aggregating actions take arguments, the setting of the 1496 * default argument value will presumably have to become more 1497 * sophisticated... 1498 */ 1499 arg = 1; 1500 } 1501 1502 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1503 size = rec->dtrd_offset - agg->dtag_base; 1504 fsize = size + rec->dtrd_size; 1505 1506 ASSERT(dbuf->dtb_tomax != NULL); 1507 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1508 1509 if ((tomax = buf->dtb_tomax) == NULL) { 1510 dtrace_buffer_drop(buf); 1511 return; 1512 } 1513 1514 /* 1515 * The metastructure is always at the bottom of the buffer. 1516 */ 1517 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1518 sizeof (dtrace_aggbuffer_t)); 1519 1520 if (buf->dtb_offset == 0) { 1521 /* 1522 * We just kludge up approximately 1/8th of the size to be 1523 * buckets. If this guess ends up being routinely 1524 * off-the-mark, we may need to dynamically readjust this 1525 * based on past performance. 1526 */ 1527 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1528 1529 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1530 (uintptr_t)tomax || hashsize == 0) { 1531 /* 1532 * We've been given a ludicrously small buffer; 1533 * increment our drop count and leave. 1534 */ 1535 dtrace_buffer_drop(buf); 1536 return; 1537 } 1538 1539 /* 1540 * And now, a pathetic attempt to try to get a an odd (or 1541 * perchance, a prime) hash size for better hash distribution. 1542 */ 1543 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1544 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1545 1546 agb->dtagb_hashsize = hashsize; 1547 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1548 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1549 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1550 1551 for (i = 0; i < agb->dtagb_hashsize; i++) 1552 agb->dtagb_hash[i] = NULL; 1553 } 1554 1555 /* 1556 * Calculate the hash value based on the key. Note that we _don't_ 1557 * include the aggid in the hashing (but we will store it as part of 1558 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1559 * algorithm: a simple, quick algorithm that has no known funnels, and 1560 * gets good distribution in practice. The efficacy of the hashing 1561 * algorithm (and a comparison with other algorithms) may be found by 1562 * running the ::dtrace_aggstat MDB dcmd. 1563 */ 1564 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1565 hashval += data[i]; 1566 hashval += (hashval << 10); 1567 hashval ^= (hashval >> 6); 1568 } 1569 1570 hashval += (hashval << 3); 1571 hashval ^= (hashval >> 11); 1572 hashval += (hashval << 15); 1573 1574 /* 1575 * Yes, the divide here is expensive. If the cycle count here becomes 1576 * prohibitive, we can do tricks to eliminate it. 1577 */ 1578 ndx = hashval % agb->dtagb_hashsize; 1579 1580 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1581 ASSERT((caddr_t)key >= tomax); 1582 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1583 1584 if (hashval != key->dtak_hashval || key->dtak_size != size) 1585 continue; 1586 1587 kdata = key->dtak_data; 1588 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1589 1590 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1591 if (kdata[i] != data[i]) 1592 goto next; 1593 } 1594 1595 if (action != key->dtak_action) { 1596 /* 1597 * We are aggregating on the same value in the same 1598 * aggregation with two different aggregating actions. 1599 * (This should have been picked up in the compiler, 1600 * so we may be dealing with errant or devious DIF.) 1601 * This is an error condition; we indicate as much, 1602 * and return. 1603 */ 1604 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1605 return; 1606 } 1607 1608 /* 1609 * This is a hit: we need to apply the aggregator to 1610 * the value at this key. 1611 */ 1612 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1613 return; 1614 next: 1615 continue; 1616 } 1617 1618 /* 1619 * We didn't find it. We need to allocate some zero-filled space, 1620 * link it into the hash table appropriately, and apply the aggregator 1621 * to the (zero-filled) value. 1622 */ 1623 offs = buf->dtb_offset; 1624 while (offs & (align - 1)) 1625 offs += sizeof (uint32_t); 1626 1627 /* 1628 * If we don't have enough room to both allocate a new key _and_ 1629 * its associated data, increment the drop count and return. 1630 */ 1631 if ((uintptr_t)tomax + offs + fsize > 1632 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1633 dtrace_buffer_drop(buf); 1634 return; 1635 } 1636 1637 /*CONSTCOND*/ 1638 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1639 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1640 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1641 1642 key->dtak_data = kdata = tomax + offs; 1643 buf->dtb_offset = offs + fsize; 1644 1645 /* 1646 * Now copy the data across. 1647 */ 1648 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1649 1650 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1651 kdata[i] = data[i]; 1652 1653 for (i = size; i < fsize; i++) 1654 kdata[i] = 0; 1655 1656 key->dtak_hashval = hashval; 1657 key->dtak_size = size; 1658 key->dtak_action = action; 1659 key->dtak_next = agb->dtagb_hash[ndx]; 1660 agb->dtagb_hash[ndx] = key; 1661 1662 /* 1663 * Finally, apply the aggregator. 1664 */ 1665 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1666 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1667 } 1668 1669 /* 1670 * Given consumer state, this routine finds a speculation in the INACTIVE 1671 * state and transitions it into the ACTIVE state. If there is no speculation 1672 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1673 * incremented -- it is up to the caller to take appropriate action. 1674 */ 1675 static int 1676 dtrace_speculation(dtrace_state_t *state) 1677 { 1678 int i = 0; 1679 dtrace_speculation_state_t current; 1680 uint32_t *stat = &state->dts_speculations_unavail, count; 1681 1682 while (i < state->dts_nspeculations) { 1683 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1684 1685 current = spec->dtsp_state; 1686 1687 if (current != DTRACESPEC_INACTIVE) { 1688 if (current == DTRACESPEC_COMMITTINGMANY || 1689 current == DTRACESPEC_COMMITTING || 1690 current == DTRACESPEC_DISCARDING) 1691 stat = &state->dts_speculations_busy; 1692 i++; 1693 continue; 1694 } 1695 1696 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1697 current, DTRACESPEC_ACTIVE) == current) 1698 return (i + 1); 1699 } 1700 1701 /* 1702 * We couldn't find a speculation. If we found as much as a single 1703 * busy speculation buffer, we'll attribute this failure as "busy" 1704 * instead of "unavail". 1705 */ 1706 do { 1707 count = *stat; 1708 } while (dtrace_cas32(stat, count, count + 1) != count); 1709 1710 return (0); 1711 } 1712 1713 /* 1714 * This routine commits an active speculation. If the specified speculation 1715 * is not in a valid state to perform a commit(), this routine will silently do 1716 * nothing. The state of the specified speculation is transitioned according 1717 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1718 */ 1719 static void 1720 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1721 dtrace_specid_t which) 1722 { 1723 dtrace_speculation_t *spec; 1724 dtrace_buffer_t *src, *dest; 1725 uintptr_t daddr, saddr, dlimit; 1726 dtrace_speculation_state_t current, new; 1727 intptr_t offs; 1728 1729 if (which == 0) 1730 return; 1731 1732 if (which > state->dts_nspeculations) { 1733 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1734 return; 1735 } 1736 1737 spec = &state->dts_speculations[which - 1]; 1738 src = &spec->dtsp_buffer[cpu]; 1739 dest = &state->dts_buffer[cpu]; 1740 1741 do { 1742 current = spec->dtsp_state; 1743 1744 if (current == DTRACESPEC_COMMITTINGMANY) 1745 break; 1746 1747 switch (current) { 1748 case DTRACESPEC_INACTIVE: 1749 case DTRACESPEC_DISCARDING: 1750 return; 1751 1752 case DTRACESPEC_COMMITTING: 1753 /* 1754 * This is only possible if we are (a) commit()'ing 1755 * without having done a prior speculate() on this CPU 1756 * and (b) racing with another commit() on a different 1757 * CPU. There's nothing to do -- we just assert that 1758 * our offset is 0. 1759 */ 1760 ASSERT(src->dtb_offset == 0); 1761 return; 1762 1763 case DTRACESPEC_ACTIVE: 1764 new = DTRACESPEC_COMMITTING; 1765 break; 1766 1767 case DTRACESPEC_ACTIVEONE: 1768 /* 1769 * This speculation is active on one CPU. If our 1770 * buffer offset is non-zero, we know that the one CPU 1771 * must be us. Otherwise, we are committing on a 1772 * different CPU from the speculate(), and we must 1773 * rely on being asynchronously cleaned. 1774 */ 1775 if (src->dtb_offset != 0) { 1776 new = DTRACESPEC_COMMITTING; 1777 break; 1778 } 1779 /*FALLTHROUGH*/ 1780 1781 case DTRACESPEC_ACTIVEMANY: 1782 new = DTRACESPEC_COMMITTINGMANY; 1783 break; 1784 1785 default: 1786 ASSERT(0); 1787 } 1788 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1789 current, new) != current); 1790 1791 /* 1792 * We have set the state to indicate that we are committing this 1793 * speculation. Now reserve the necessary space in the destination 1794 * buffer. 1795 */ 1796 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 1797 sizeof (uint64_t), state, NULL)) < 0) { 1798 dtrace_buffer_drop(dest); 1799 goto out; 1800 } 1801 1802 /* 1803 * We have the space; copy the buffer across. (Note that this is a 1804 * highly subobtimal bcopy(); in the unlikely event that this becomes 1805 * a serious performance issue, a high-performance DTrace-specific 1806 * bcopy() should obviously be invented.) 1807 */ 1808 daddr = (uintptr_t)dest->dtb_tomax + offs; 1809 dlimit = daddr + src->dtb_offset; 1810 saddr = (uintptr_t)src->dtb_tomax; 1811 1812 /* 1813 * First, the aligned portion. 1814 */ 1815 while (dlimit - daddr >= sizeof (uint64_t)) { 1816 *((uint64_t *)daddr) = *((uint64_t *)saddr); 1817 1818 daddr += sizeof (uint64_t); 1819 saddr += sizeof (uint64_t); 1820 } 1821 1822 /* 1823 * Now any left-over bit... 1824 */ 1825 while (dlimit - daddr) 1826 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 1827 1828 /* 1829 * Finally, commit the reserved space in the destination buffer. 1830 */ 1831 dest->dtb_offset = offs + src->dtb_offset; 1832 1833 out: 1834 /* 1835 * If we're lucky enough to be the only active CPU on this speculation 1836 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 1837 */ 1838 if (current == DTRACESPEC_ACTIVE || 1839 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 1840 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 1841 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 1842 1843 ASSERT(rval == DTRACESPEC_COMMITTING); 1844 } 1845 1846 src->dtb_offset = 0; 1847 src->dtb_xamot_drops += src->dtb_drops; 1848 src->dtb_drops = 0; 1849 } 1850 1851 /* 1852 * This routine discards an active speculation. If the specified speculation 1853 * is not in a valid state to perform a discard(), this routine will silently 1854 * do nothing. The state of the specified speculation is transitioned 1855 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 1856 */ 1857 static void 1858 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 1859 dtrace_specid_t which) 1860 { 1861 dtrace_speculation_t *spec; 1862 dtrace_speculation_state_t current, new; 1863 dtrace_buffer_t *buf; 1864 1865 if (which == 0) 1866 return; 1867 1868 if (which > state->dts_nspeculations) { 1869 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1870 return; 1871 } 1872 1873 spec = &state->dts_speculations[which - 1]; 1874 buf = &spec->dtsp_buffer[cpu]; 1875 1876 do { 1877 current = spec->dtsp_state; 1878 1879 switch (current) { 1880 case DTRACESPEC_INACTIVE: 1881 case DTRACESPEC_COMMITTINGMANY: 1882 case DTRACESPEC_COMMITTING: 1883 case DTRACESPEC_DISCARDING: 1884 return; 1885 1886 case DTRACESPEC_ACTIVE: 1887 case DTRACESPEC_ACTIVEMANY: 1888 new = DTRACESPEC_DISCARDING; 1889 break; 1890 1891 case DTRACESPEC_ACTIVEONE: 1892 if (buf->dtb_offset != 0) { 1893 new = DTRACESPEC_INACTIVE; 1894 } else { 1895 new = DTRACESPEC_DISCARDING; 1896 } 1897 break; 1898 1899 default: 1900 ASSERT(0); 1901 } 1902 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1903 current, new) != current); 1904 1905 buf->dtb_offset = 0; 1906 buf->dtb_drops = 0; 1907 } 1908 1909 /* 1910 * Note: not called from probe context. This function is called 1911 * asynchronously from cross call context to clean any speculations that are 1912 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 1913 * transitioned back to the INACTIVE state until all CPUs have cleaned the 1914 * speculation. 1915 */ 1916 static void 1917 dtrace_speculation_clean_here(dtrace_state_t *state) 1918 { 1919 dtrace_icookie_t cookie; 1920 processorid_t cpu = CPU->cpu_id; 1921 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 1922 dtrace_specid_t i; 1923 1924 cookie = dtrace_interrupt_disable(); 1925 1926 if (dest->dtb_tomax == NULL) { 1927 dtrace_interrupt_enable(cookie); 1928 return; 1929 } 1930 1931 for (i = 0; i < state->dts_nspeculations; i++) { 1932 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1933 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 1934 1935 if (src->dtb_tomax == NULL) 1936 continue; 1937 1938 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 1939 src->dtb_offset = 0; 1940 continue; 1941 } 1942 1943 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1944 continue; 1945 1946 if (src->dtb_offset == 0) 1947 continue; 1948 1949 dtrace_speculation_commit(state, cpu, i + 1); 1950 } 1951 1952 dtrace_interrupt_enable(cookie); 1953 } 1954 1955 /* 1956 * Note: not called from probe context. This function is called 1957 * asynchronously (and at a regular interval) to clean any speculations that 1958 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 1959 * is work to be done, it cross calls all CPUs to perform that work; 1960 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 1961 * INACTIVE state until they have been cleaned by all CPUs. 1962 */ 1963 static void 1964 dtrace_speculation_clean(dtrace_state_t *state) 1965 { 1966 int work = 0, rv; 1967 dtrace_specid_t i; 1968 1969 for (i = 0; i < state->dts_nspeculations; i++) { 1970 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1971 1972 ASSERT(!spec->dtsp_cleaning); 1973 1974 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 1975 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1976 continue; 1977 1978 work++; 1979 spec->dtsp_cleaning = 1; 1980 } 1981 1982 if (!work) 1983 return; 1984 1985 dtrace_xcall(DTRACE_CPUALL, 1986 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 1987 1988 /* 1989 * We now know that all CPUs have committed or discarded their 1990 * speculation buffers, as appropriate. We can now set the state 1991 * to inactive. 1992 */ 1993 for (i = 0; i < state->dts_nspeculations; i++) { 1994 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1995 dtrace_speculation_state_t current, new; 1996 1997 if (!spec->dtsp_cleaning) 1998 continue; 1999 2000 current = spec->dtsp_state; 2001 ASSERT(current == DTRACESPEC_DISCARDING || 2002 current == DTRACESPEC_COMMITTINGMANY); 2003 2004 new = DTRACESPEC_INACTIVE; 2005 2006 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2007 ASSERT(rv == current); 2008 spec->dtsp_cleaning = 0; 2009 } 2010 } 2011 2012 /* 2013 * Called as part of a speculate() to get the speculative buffer associated 2014 * with a given speculation. Returns NULL if the specified speculation is not 2015 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2016 * the active CPU is not the specified CPU -- the speculation will be 2017 * atomically transitioned into the ACTIVEMANY state. 2018 */ 2019 static dtrace_buffer_t * 2020 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2021 dtrace_specid_t which) 2022 { 2023 dtrace_speculation_t *spec; 2024 dtrace_speculation_state_t current, new; 2025 dtrace_buffer_t *buf; 2026 2027 if (which == 0) 2028 return (NULL); 2029 2030 if (which > state->dts_nspeculations) { 2031 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2032 return (NULL); 2033 } 2034 2035 spec = &state->dts_speculations[which - 1]; 2036 buf = &spec->dtsp_buffer[cpuid]; 2037 2038 do { 2039 current = spec->dtsp_state; 2040 2041 switch (current) { 2042 case DTRACESPEC_INACTIVE: 2043 case DTRACESPEC_COMMITTINGMANY: 2044 case DTRACESPEC_DISCARDING: 2045 return (NULL); 2046 2047 case DTRACESPEC_COMMITTING: 2048 ASSERT(buf->dtb_offset == 0); 2049 return (NULL); 2050 2051 case DTRACESPEC_ACTIVEONE: 2052 /* 2053 * This speculation is currently active on one CPU. 2054 * Check the offset in the buffer; if it's non-zero, 2055 * that CPU must be us (and we leave the state alone). 2056 * If it's zero, assume that we're starting on a new 2057 * CPU -- and change the state to indicate that the 2058 * speculation is active on more than one CPU. 2059 */ 2060 if (buf->dtb_offset != 0) 2061 return (buf); 2062 2063 new = DTRACESPEC_ACTIVEMANY; 2064 break; 2065 2066 case DTRACESPEC_ACTIVEMANY: 2067 return (buf); 2068 2069 case DTRACESPEC_ACTIVE: 2070 new = DTRACESPEC_ACTIVEONE; 2071 break; 2072 2073 default: 2074 ASSERT(0); 2075 } 2076 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2077 current, new) != current); 2078 2079 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2080 return (buf); 2081 } 2082 2083 /* 2084 * This function implements the DIF emulator's variable lookups. The emulator 2085 * passes a reserved variable identifier and optional built-in array index. 2086 */ 2087 static uint64_t 2088 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2089 uint64_t ndx) 2090 { 2091 /* 2092 * If we're accessing one of the uncached arguments, we'll turn this 2093 * into a reference in the args array. 2094 */ 2095 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2096 ndx = v - DIF_VAR_ARG0; 2097 v = DIF_VAR_ARGS; 2098 } 2099 2100 switch (v) { 2101 case DIF_VAR_ARGS: 2102 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2103 if (ndx >= sizeof (mstate->dtms_arg) / 2104 sizeof (mstate->dtms_arg[0])) { 2105 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2106 dtrace_provider_t *pv; 2107 uint64_t val; 2108 2109 pv = mstate->dtms_probe->dtpr_provider; 2110 if (pv->dtpv_pops.dtps_getargval != NULL) 2111 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2112 mstate->dtms_probe->dtpr_id, 2113 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2114 else 2115 val = dtrace_getarg(ndx, aframes); 2116 2117 /* 2118 * This is regrettably required to keep the compiler 2119 * from tail-optimizing the call to dtrace_getarg(). 2120 * The condition always evaluates to true, but the 2121 * compiler has no way of figuring that out a priori. 2122 * (None of this would be necessary if the compiler 2123 * could be relied upon to _always_ tail-optimize 2124 * the call to dtrace_getarg() -- but it can't.) 2125 */ 2126 if (mstate->dtms_probe != NULL) 2127 return (val); 2128 2129 ASSERT(0); 2130 } 2131 2132 return (mstate->dtms_arg[ndx]); 2133 2134 case DIF_VAR_UREGS: { 2135 klwp_t *lwp; 2136 2137 if (!dtrace_priv_proc(state)) 2138 return (0); 2139 2140 if ((lwp = curthread->t_lwp) == NULL) { 2141 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2142 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2143 return (0); 2144 } 2145 2146 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2147 } 2148 2149 case DIF_VAR_CURTHREAD: 2150 if (!dtrace_priv_kernel(state)) 2151 return (0); 2152 return ((uint64_t)(uintptr_t)curthread); 2153 2154 case DIF_VAR_TIMESTAMP: 2155 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2156 mstate->dtms_timestamp = dtrace_gethrtime(); 2157 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2158 } 2159 return (mstate->dtms_timestamp); 2160 2161 case DIF_VAR_VTIMESTAMP: 2162 ASSERT(dtrace_vtime_references != 0); 2163 return (curthread->t_dtrace_vtime); 2164 2165 case DIF_VAR_WALLTIMESTAMP: 2166 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2167 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2168 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2169 } 2170 return (mstate->dtms_walltimestamp); 2171 2172 case DIF_VAR_IPL: 2173 if (!dtrace_priv_kernel(state)) 2174 return (0); 2175 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2176 mstate->dtms_ipl = dtrace_getipl(); 2177 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2178 } 2179 return (mstate->dtms_ipl); 2180 2181 case DIF_VAR_EPID: 2182 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2183 return (mstate->dtms_epid); 2184 2185 case DIF_VAR_ID: 2186 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2187 return (mstate->dtms_probe->dtpr_id); 2188 2189 case DIF_VAR_STACKDEPTH: 2190 if (!dtrace_priv_kernel(state)) 2191 return (0); 2192 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2193 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2194 2195 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2196 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2197 } 2198 return (mstate->dtms_stackdepth); 2199 2200 case DIF_VAR_USTACKDEPTH: 2201 if (!dtrace_priv_proc(state)) 2202 return (0); 2203 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2204 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2205 mstate->dtms_ustackdepth = dtrace_getustackdepth(); 2206 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2207 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2208 } 2209 return (mstate->dtms_ustackdepth); 2210 2211 case DIF_VAR_CALLER: 2212 if (!dtrace_priv_kernel(state)) 2213 return (0); 2214 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2215 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2216 2217 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2218 /* 2219 * If this is an unanchored probe, we are 2220 * required to go through the slow path: 2221 * dtrace_caller() only guarantees correct 2222 * results for anchored probes. 2223 */ 2224 pc_t caller[2]; 2225 2226 dtrace_getpcstack(caller, 2, aframes, 2227 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2228 mstate->dtms_caller = caller[1]; 2229 } else if ((mstate->dtms_caller = 2230 dtrace_caller(aframes)) == -1) { 2231 /* 2232 * We have failed to do this the quick way; 2233 * we must resort to the slower approach of 2234 * calling dtrace_getpcstack(). 2235 */ 2236 pc_t caller; 2237 2238 dtrace_getpcstack(&caller, 1, aframes, NULL); 2239 mstate->dtms_caller = caller; 2240 } 2241 2242 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2243 } 2244 return (mstate->dtms_caller); 2245 2246 case DIF_VAR_UCALLER: 2247 if (!dtrace_priv_proc(state)) 2248 return (0); 2249 2250 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2251 uint64_t ustack[3]; 2252 2253 /* 2254 * dtrace_getupcstack() fills in the first uint64_t 2255 * with the current PID. The second uint64_t will 2256 * be the program counter at user-level. The third 2257 * uint64_t will contain the caller, which is what 2258 * we're after. 2259 */ 2260 ustack[2] = NULL; 2261 dtrace_getupcstack(ustack, 3); 2262 mstate->dtms_ucaller = ustack[2]; 2263 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2264 } 2265 2266 return (mstate->dtms_ucaller); 2267 2268 case DIF_VAR_PROBEPROV: 2269 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2270 return ((uint64_t)(uintptr_t) 2271 mstate->dtms_probe->dtpr_provider->dtpv_name); 2272 2273 case DIF_VAR_PROBEMOD: 2274 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2275 return ((uint64_t)(uintptr_t) 2276 mstate->dtms_probe->dtpr_mod); 2277 2278 case DIF_VAR_PROBEFUNC: 2279 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2280 return ((uint64_t)(uintptr_t) 2281 mstate->dtms_probe->dtpr_func); 2282 2283 case DIF_VAR_PROBENAME: 2284 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2285 return ((uint64_t)(uintptr_t) 2286 mstate->dtms_probe->dtpr_name); 2287 2288 case DIF_VAR_PID: 2289 if (!dtrace_priv_proc(state)) 2290 return (0); 2291 2292 /* 2293 * Note that we are assuming that an unanchored probe is 2294 * always due to a high-level interrupt. (And we're assuming 2295 * that there is only a single high level interrupt.) 2296 */ 2297 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2298 return (pid0.pid_id); 2299 2300 /* 2301 * It is always safe to dereference one's own t_procp pointer: 2302 * it always points to a valid, allocated proc structure. 2303 * Further, it is always safe to dereference the p_pidp member 2304 * of one's own proc structure. (These are truisms becuase 2305 * threads and processes don't clean up their own state -- 2306 * they leave that task to whomever reaps them.) 2307 */ 2308 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2309 2310 case DIF_VAR_TID: 2311 /* 2312 * See comment in DIF_VAR_PID. 2313 */ 2314 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2315 return (0); 2316 2317 return ((uint64_t)curthread->t_tid); 2318 2319 case DIF_VAR_EXECNAME: 2320 if (!dtrace_priv_proc(state)) 2321 return (0); 2322 2323 /* 2324 * See comment in DIF_VAR_PID. 2325 */ 2326 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2327 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2328 2329 /* 2330 * It is always safe to dereference one's own t_procp pointer: 2331 * it always points to a valid, allocated proc structure. 2332 * (This is true because threads don't clean up their own 2333 * state -- they leave that task to whomever reaps them.) 2334 */ 2335 return ((uint64_t)(uintptr_t) 2336 curthread->t_procp->p_user.u_comm); 2337 2338 case DIF_VAR_ZONENAME: 2339 if (!dtrace_priv_proc(state)) 2340 return (0); 2341 2342 /* 2343 * See comment in DIF_VAR_PID. 2344 */ 2345 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2346 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2347 2348 /* 2349 * It is always safe to dereference one's own t_procp pointer: 2350 * it always points to a valid, allocated proc structure. 2351 * (This is true because threads don't clean up their own 2352 * state -- they leave that task to whomever reaps them.) 2353 */ 2354 return ((uint64_t)(uintptr_t) 2355 curthread->t_procp->p_zone->zone_name); 2356 2357 default: 2358 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2359 return (0); 2360 } 2361 } 2362 2363 /* 2364 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2365 * Notice that we don't bother validating the proper number of arguments or 2366 * their types in the tuple stack. This isn't needed because all argument 2367 * interpretation is safe because of our load safety -- the worst that can 2368 * happen is that a bogus program can obtain bogus results. 2369 */ 2370 static void 2371 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2372 dtrace_key_t *tupregs, int nargs, 2373 dtrace_mstate_t *mstate, dtrace_state_t *state) 2374 { 2375 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2376 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2377 2378 union { 2379 mutex_impl_t mi; 2380 uint64_t mx; 2381 } m; 2382 2383 union { 2384 krwlock_t ri; 2385 uintptr_t rw; 2386 } r; 2387 2388 switch (subr) { 2389 case DIF_SUBR_RAND: 2390 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2391 break; 2392 2393 case DIF_SUBR_MUTEX_OWNED: 2394 m.mx = dtrace_load64(tupregs[0].dttk_value); 2395 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2396 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2397 else 2398 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2399 break; 2400 2401 case DIF_SUBR_MUTEX_OWNER: 2402 m.mx = dtrace_load64(tupregs[0].dttk_value); 2403 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2404 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2405 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2406 else 2407 regs[rd] = 0; 2408 break; 2409 2410 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2411 m.mx = dtrace_load64(tupregs[0].dttk_value); 2412 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2413 break; 2414 2415 case DIF_SUBR_MUTEX_TYPE_SPIN: 2416 m.mx = dtrace_load64(tupregs[0].dttk_value); 2417 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2418 break; 2419 2420 case DIF_SUBR_RW_READ_HELD: { 2421 uintptr_t tmp; 2422 2423 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2424 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2425 break; 2426 } 2427 2428 case DIF_SUBR_RW_WRITE_HELD: 2429 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2430 regs[rd] = _RW_WRITE_HELD(&r.ri); 2431 break; 2432 2433 case DIF_SUBR_RW_ISWRITER: 2434 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2435 regs[rd] = _RW_ISWRITER(&r.ri); 2436 break; 2437 2438 case DIF_SUBR_BCOPY: { 2439 /* 2440 * We need to be sure that the destination is in the scratch 2441 * region -- no other region is allowed. 2442 */ 2443 uintptr_t src = tupregs[0].dttk_value; 2444 uintptr_t dest = tupregs[1].dttk_value; 2445 size_t size = tupregs[2].dttk_value; 2446 2447 if (!dtrace_inscratch(dest, size, mstate)) { 2448 *flags |= CPU_DTRACE_BADADDR; 2449 *illval = regs[rd]; 2450 break; 2451 } 2452 2453 dtrace_bcopy((void *)src, (void *)dest, size); 2454 break; 2455 } 2456 2457 case DIF_SUBR_ALLOCA: 2458 case DIF_SUBR_COPYIN: { 2459 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2460 uint64_t size = 2461 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2462 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2463 2464 /* 2465 * This action doesn't require any credential checks since 2466 * probes will not activate in user contexts to which the 2467 * enabling user does not have permissions. 2468 */ 2469 if (mstate->dtms_scratch_ptr + scratch_size > 2470 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2471 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2472 regs[rd] = NULL; 2473 break; 2474 } 2475 2476 if (subr == DIF_SUBR_COPYIN) { 2477 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2478 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2479 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2480 } 2481 2482 mstate->dtms_scratch_ptr += scratch_size; 2483 regs[rd] = dest; 2484 break; 2485 } 2486 2487 case DIF_SUBR_COPYINTO: { 2488 uint64_t size = tupregs[1].dttk_value; 2489 uintptr_t dest = tupregs[2].dttk_value; 2490 2491 /* 2492 * This action doesn't require any credential checks since 2493 * probes will not activate in user contexts to which the 2494 * enabling user does not have permissions. 2495 */ 2496 if (!dtrace_inscratch(dest, size, mstate)) { 2497 *flags |= CPU_DTRACE_BADADDR; 2498 *illval = regs[rd]; 2499 break; 2500 } 2501 2502 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2503 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2504 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2505 break; 2506 } 2507 2508 case DIF_SUBR_COPYINSTR: { 2509 uintptr_t dest = mstate->dtms_scratch_ptr; 2510 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2511 2512 if (nargs > 1 && tupregs[1].dttk_value < size) 2513 size = tupregs[1].dttk_value + 1; 2514 2515 /* 2516 * This action doesn't require any credential checks since 2517 * probes will not activate in user contexts to which the 2518 * enabling user does not have permissions. 2519 */ 2520 if (mstate->dtms_scratch_ptr + size > 2521 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2522 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2523 regs[rd] = NULL; 2524 break; 2525 } 2526 2527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2528 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2529 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2530 2531 ((char *)dest)[size - 1] = '\0'; 2532 mstate->dtms_scratch_ptr += size; 2533 regs[rd] = dest; 2534 break; 2535 } 2536 2537 case DIF_SUBR_MSGSIZE: 2538 case DIF_SUBR_MSGDSIZE: { 2539 uintptr_t baddr = tupregs[0].dttk_value, daddr; 2540 uintptr_t wptr, rptr; 2541 size_t count = 0; 2542 int cont = 0; 2543 2544 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2545 wptr = dtrace_loadptr(baddr + 2546 offsetof(mblk_t, b_wptr)); 2547 2548 rptr = dtrace_loadptr(baddr + 2549 offsetof(mblk_t, b_rptr)); 2550 2551 if (wptr < rptr) { 2552 *flags |= CPU_DTRACE_BADADDR; 2553 *illval = tupregs[0].dttk_value; 2554 break; 2555 } 2556 2557 daddr = dtrace_loadptr(baddr + 2558 offsetof(mblk_t, b_datap)); 2559 2560 baddr = dtrace_loadptr(baddr + 2561 offsetof(mblk_t, b_cont)); 2562 2563 /* 2564 * We want to prevent against denial-of-service here, 2565 * so we're only going to search the list for 2566 * dtrace_msgdsize_max mblks. 2567 */ 2568 if (cont++ > dtrace_msgdsize_max) { 2569 *flags |= CPU_DTRACE_ILLOP; 2570 break; 2571 } 2572 2573 if (subr == DIF_SUBR_MSGDSIZE) { 2574 if (dtrace_load8(daddr + 2575 offsetof(dblk_t, db_type)) != M_DATA) 2576 continue; 2577 } 2578 2579 count += wptr - rptr; 2580 } 2581 2582 if (!(*flags & CPU_DTRACE_FAULT)) 2583 regs[rd] = count; 2584 2585 break; 2586 } 2587 2588 case DIF_SUBR_PROGENYOF: { 2589 pid_t pid = tupregs[0].dttk_value; 2590 proc_t *p; 2591 int rval = 0; 2592 2593 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2594 2595 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 2596 if (p->p_pidp->pid_id == pid) { 2597 rval = 1; 2598 break; 2599 } 2600 } 2601 2602 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2603 2604 regs[rd] = rval; 2605 break; 2606 } 2607 2608 case DIF_SUBR_SPECULATION: 2609 regs[rd] = dtrace_speculation(state); 2610 break; 2611 2612 case DIF_SUBR_COPYOUT: { 2613 uintptr_t kaddr = tupregs[0].dttk_value; 2614 uintptr_t uaddr = tupregs[1].dttk_value; 2615 uint64_t size = tupregs[2].dttk_value; 2616 2617 if (!dtrace_destructive_disallow && 2618 dtrace_priv_proc_control(state) && 2619 !dtrace_istoxic(kaddr, size)) { 2620 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2621 dtrace_copyout(kaddr, uaddr, size); 2622 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2623 } 2624 break; 2625 } 2626 2627 case DIF_SUBR_COPYOUTSTR: { 2628 uintptr_t kaddr = tupregs[0].dttk_value; 2629 uintptr_t uaddr = tupregs[1].dttk_value; 2630 uint64_t size = tupregs[2].dttk_value; 2631 2632 if (!dtrace_destructive_disallow && 2633 dtrace_priv_proc_control(state) && 2634 !dtrace_istoxic(kaddr, size)) { 2635 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2636 dtrace_copyoutstr(kaddr, uaddr, size); 2637 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2638 } 2639 break; 2640 } 2641 2642 case DIF_SUBR_STRLEN: 2643 regs[rd] = dtrace_strlen((char *)(uintptr_t) 2644 tupregs[0].dttk_value, 2645 state->dts_options[DTRACEOPT_STRSIZE]); 2646 break; 2647 2648 case DIF_SUBR_STRCHR: 2649 case DIF_SUBR_STRRCHR: { 2650 /* 2651 * We're going to iterate over the string looking for the 2652 * specified character. We will iterate until we have reached 2653 * the string length or we have found the character. If this 2654 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 2655 * of the specified character instead of the first. 2656 */ 2657 uintptr_t addr = tupregs[0].dttk_value; 2658 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 2659 char c, target = (char)tupregs[1].dttk_value; 2660 2661 for (regs[rd] = NULL; addr < limit; addr++) { 2662 if ((c = dtrace_load8(addr)) == target) { 2663 regs[rd] = addr; 2664 2665 if (subr == DIF_SUBR_STRCHR) 2666 break; 2667 } 2668 2669 if (c == '\0') 2670 break; 2671 } 2672 2673 break; 2674 } 2675 2676 case DIF_SUBR_STRSTR: 2677 case DIF_SUBR_INDEX: 2678 case DIF_SUBR_RINDEX: { 2679 /* 2680 * We're going to iterate over the string looking for the 2681 * specified string. We will iterate until we have reached 2682 * the string length or we have found the string. (Yes, this 2683 * is done in the most naive way possible -- but considering 2684 * that the string we're searching for is likely to be 2685 * relatively short, the complexity of Rabin-Karp or similar 2686 * hardly seems merited.) 2687 */ 2688 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 2689 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 2690 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2691 size_t len = dtrace_strlen(addr, size); 2692 size_t sublen = dtrace_strlen(substr, size); 2693 char *limit = addr + len, *orig = addr; 2694 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 2695 int inc = 1; 2696 2697 regs[rd] = notfound; 2698 2699 /* 2700 * strstr() and index()/rindex() have similar semantics if 2701 * both strings are the empty string: strstr() returns a 2702 * pointer to the (empty) string, and index() and rindex() 2703 * both return index 0 (regardless of any position argument). 2704 */ 2705 if (sublen == 0 && len == 0) { 2706 if (subr == DIF_SUBR_STRSTR) 2707 regs[rd] = (uintptr_t)addr; 2708 else 2709 regs[rd] = 0; 2710 break; 2711 } 2712 2713 if (subr != DIF_SUBR_STRSTR) { 2714 if (subr == DIF_SUBR_RINDEX) { 2715 limit = orig - 1; 2716 addr += len; 2717 inc = -1; 2718 } 2719 2720 /* 2721 * Both index() and rindex() take an optional position 2722 * argument that denotes the starting position. 2723 */ 2724 if (nargs == 3) { 2725 int64_t pos = (int64_t)tupregs[2].dttk_value; 2726 2727 /* 2728 * If the position argument to index() is 2729 * negative, Perl implicitly clamps it at 2730 * zero. This semantic is a little surprising 2731 * given the special meaning of negative 2732 * positions to similar Perl functions like 2733 * substr(), but it appears to reflect a 2734 * notion that index() can start from a 2735 * negative index and increment its way up to 2736 * the string. Given this notion, Perl's 2737 * rindex() is at least self-consistent in 2738 * that it implicitly clamps positions greater 2739 * than the string length to be the string 2740 * length. Where Perl completely loses 2741 * coherence, however, is when the specified 2742 * substring is the empty string (""). In 2743 * this case, even if the position is 2744 * negative, rindex() returns 0 -- and even if 2745 * the position is greater than the length, 2746 * index() returns the string length. These 2747 * semantics violate the notion that index() 2748 * should never return a value less than the 2749 * specified position and that rindex() should 2750 * never return a value greater than the 2751 * specified position. (One assumes that 2752 * these semantics are artifacts of Perl's 2753 * implementation and not the results of 2754 * deliberate design -- it beggars belief that 2755 * even Larry Wall could desire such oddness.) 2756 * While in the abstract one would wish for 2757 * consistent position semantics across 2758 * substr(), index() and rindex() -- or at the 2759 * very least self-consistent position 2760 * semantics for index() and rindex() -- we 2761 * instead opt to keep with the extant Perl 2762 * semantics, in all their broken glory. (Do 2763 * we have more desire to maintain Perl's 2764 * semantics than Perl does? Probably.) 2765 */ 2766 if (subr == DIF_SUBR_RINDEX) { 2767 if (pos < 0) { 2768 if (sublen == 0) 2769 regs[rd] = 0; 2770 break; 2771 } 2772 2773 if (pos > len) 2774 pos = len; 2775 } else { 2776 if (pos < 0) 2777 pos = 0; 2778 2779 if (pos >= len) { 2780 if (sublen == 0) 2781 regs[rd] = len; 2782 break; 2783 } 2784 } 2785 2786 addr = orig + pos; 2787 } 2788 } 2789 2790 for (regs[rd] = notfound; addr != limit; addr += inc) { 2791 if (dtrace_strncmp(addr, substr, sublen) == 0) { 2792 if (subr != DIF_SUBR_STRSTR) { 2793 /* 2794 * As D index() and rindex() are 2795 * modeled on Perl (and not on awk), 2796 * we return a zero-based (and not a 2797 * one-based) index. (For you Perl 2798 * weenies: no, we're not going to add 2799 * $[ -- and shouldn't you be at a con 2800 * or something?) 2801 */ 2802 regs[rd] = (uintptr_t)(addr - orig); 2803 break; 2804 } 2805 2806 ASSERT(subr == DIF_SUBR_STRSTR); 2807 regs[rd] = (uintptr_t)addr; 2808 break; 2809 } 2810 } 2811 2812 break; 2813 } 2814 2815 case DIF_SUBR_STRTOK: { 2816 uintptr_t addr = tupregs[0].dttk_value; 2817 uintptr_t tokaddr = tupregs[1].dttk_value; 2818 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2819 uintptr_t limit, toklimit = tokaddr + size; 2820 uint8_t c, tokmap[32]; /* 256 / 8 */ 2821 char *dest = (char *)mstate->dtms_scratch_ptr; 2822 int i; 2823 2824 if (mstate->dtms_scratch_ptr + size > 2825 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2826 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2827 regs[rd] = NULL; 2828 break; 2829 } 2830 2831 if (addr == NULL) { 2832 /* 2833 * If the address specified is NULL, we use our saved 2834 * strtok pointer from the mstate. Note that this 2835 * means that the saved strtok pointer is _only_ 2836 * valid within multiple enablings of the same probe -- 2837 * it behaves like an implicit clause-local variable. 2838 */ 2839 addr = mstate->dtms_strtok; 2840 } 2841 2842 /* 2843 * First, zero the token map, and then process the token 2844 * string -- setting a bit in the map for every character 2845 * found in the token string. 2846 */ 2847 for (i = 0; i < sizeof (tokmap); i++) 2848 tokmap[i] = 0; 2849 2850 for (; tokaddr < toklimit; tokaddr++) { 2851 if ((c = dtrace_load8(tokaddr)) == '\0') 2852 break; 2853 2854 ASSERT((c >> 3) < sizeof (tokmap)); 2855 tokmap[c >> 3] |= (1 << (c & 0x7)); 2856 } 2857 2858 for (limit = addr + size; addr < limit; addr++) { 2859 /* 2860 * We're looking for a character that is _not_ contained 2861 * in the token string. 2862 */ 2863 if ((c = dtrace_load8(addr)) == '\0') 2864 break; 2865 2866 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 2867 break; 2868 } 2869 2870 if (c == '\0') { 2871 /* 2872 * We reached the end of the string without finding 2873 * any character that was not in the token string. 2874 * We return NULL in this case, and we set the saved 2875 * address to NULL as well. 2876 */ 2877 regs[rd] = NULL; 2878 mstate->dtms_strtok = NULL; 2879 break; 2880 } 2881 2882 /* 2883 * From here on, we're copying into the destination string. 2884 */ 2885 for (i = 0; addr < limit && i < size - 1; addr++) { 2886 if ((c = dtrace_load8(addr)) == '\0') 2887 break; 2888 2889 if (tokmap[c >> 3] & (1 << (c & 0x7))) 2890 break; 2891 2892 ASSERT(i < size); 2893 dest[i++] = c; 2894 } 2895 2896 ASSERT(i < size); 2897 dest[i] = '\0'; 2898 regs[rd] = (uintptr_t)dest; 2899 mstate->dtms_scratch_ptr += size; 2900 mstate->dtms_strtok = addr; 2901 break; 2902 } 2903 2904 case DIF_SUBR_SUBSTR: { 2905 uintptr_t s = tupregs[0].dttk_value; 2906 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2907 char *d = (char *)mstate->dtms_scratch_ptr; 2908 int64_t index = (int64_t)tupregs[1].dttk_value; 2909 int64_t remaining = (int64_t)tupregs[2].dttk_value; 2910 size_t len = dtrace_strlen((char *)s, size); 2911 int64_t i = 0; 2912 2913 if (nargs <= 2) 2914 remaining = (int64_t)size; 2915 2916 if (mstate->dtms_scratch_ptr + size > 2917 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2918 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2919 regs[rd] = NULL; 2920 break; 2921 } 2922 2923 if (index < 0) { 2924 index += len; 2925 2926 if (index < 0 && index + remaining > 0) { 2927 remaining += index; 2928 index = 0; 2929 } 2930 } 2931 2932 if (index >= len || index < 0) 2933 index = len; 2934 2935 for (d[0] = '\0'; remaining > 0; remaining--) { 2936 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 2937 break; 2938 2939 if (i == size) { 2940 d[i - 1] = '\0'; 2941 break; 2942 } 2943 } 2944 2945 mstate->dtms_scratch_ptr += size; 2946 regs[rd] = (uintptr_t)d; 2947 break; 2948 } 2949 2950 case DIF_SUBR_GETMAJOR: 2951 #ifdef _LP64 2952 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 2953 #else 2954 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 2955 #endif 2956 break; 2957 2958 case DIF_SUBR_GETMINOR: 2959 #ifdef _LP64 2960 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 2961 #else 2962 regs[rd] = tupregs[0].dttk_value & MAXMIN; 2963 #endif 2964 break; 2965 2966 case DIF_SUBR_DDI_PATHNAME: { 2967 /* 2968 * This one is a galactic mess. We are going to roughly 2969 * emulate ddi_pathname(), but it's made more complicated 2970 * by the fact that we (a) want to include the minor name and 2971 * (b) must proceed iteratively instead of recursively. 2972 */ 2973 uintptr_t dest = mstate->dtms_scratch_ptr; 2974 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2975 char *start = (char *)dest, *end = start + size - 1; 2976 uintptr_t daddr = tupregs[0].dttk_value; 2977 int64_t minor = (int64_t)tupregs[1].dttk_value; 2978 char *s; 2979 int i, len, depth = 0; 2980 2981 if (size == 0 || mstate->dtms_scratch_ptr + size > 2982 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2983 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2984 regs[rd] = NULL; 2985 break; 2986 } 2987 2988 *end = '\0'; 2989 2990 /* 2991 * We want to have a name for the minor. In order to do this, 2992 * we need to walk the minor list from the devinfo. We want 2993 * to be sure that we don't infinitely walk a circular list, 2994 * so we check for circularity by sending a scout pointer 2995 * ahead two elements for every element that we iterate over; 2996 * if the list is circular, these will ultimately point to the 2997 * same element. You may recognize this little trick as the 2998 * answer to a stupid interview question -- one that always 2999 * seems to be asked by those who had to have it laboriously 3000 * explained to them, and who can't even concisely describe 3001 * the conditions under which one would be forced to resort to 3002 * this technique. Needless to say, those conditions are 3003 * found here -- and probably only here. Is this is the only 3004 * use of this infamous trick in shipping, production code? 3005 * If it isn't, it probably should be... 3006 */ 3007 if (minor != -1) { 3008 uintptr_t maddr = dtrace_loadptr(daddr + 3009 offsetof(struct dev_info, devi_minor)); 3010 3011 uintptr_t next = offsetof(struct ddi_minor_data, next); 3012 uintptr_t name = offsetof(struct ddi_minor_data, 3013 d_minor) + offsetof(struct ddi_minor, name); 3014 uintptr_t dev = offsetof(struct ddi_minor_data, 3015 d_minor) + offsetof(struct ddi_minor, dev); 3016 uintptr_t scout; 3017 3018 if (maddr != NULL) 3019 scout = dtrace_loadptr(maddr + next); 3020 3021 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3022 uint64_t m; 3023 #ifdef _LP64 3024 m = dtrace_load64(maddr + dev) & MAXMIN64; 3025 #else 3026 m = dtrace_load32(maddr + dev) & MAXMIN; 3027 #endif 3028 if (m != minor) { 3029 maddr = dtrace_loadptr(maddr + next); 3030 3031 if (scout == NULL) 3032 continue; 3033 3034 scout = dtrace_loadptr(scout + next); 3035 3036 if (scout == NULL) 3037 continue; 3038 3039 scout = dtrace_loadptr(scout + next); 3040 3041 if (scout == NULL) 3042 continue; 3043 3044 if (scout == maddr) { 3045 *flags |= CPU_DTRACE_ILLOP; 3046 break; 3047 } 3048 3049 continue; 3050 } 3051 3052 /* 3053 * We have the minor data. Now we need to 3054 * copy the minor's name into the end of the 3055 * pathname. 3056 */ 3057 s = (char *)dtrace_loadptr(maddr + name); 3058 len = dtrace_strlen(s, size); 3059 3060 if (*flags & CPU_DTRACE_FAULT) 3061 break; 3062 3063 if (len != 0) { 3064 if ((end -= (len + 1)) < start) 3065 break; 3066 3067 *end = ':'; 3068 } 3069 3070 for (i = 1; i <= len; i++) 3071 end[i] = dtrace_load8((uintptr_t)s++); 3072 break; 3073 } 3074 } 3075 3076 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3077 ddi_node_state_t devi_state; 3078 3079 devi_state = dtrace_load32(daddr + 3080 offsetof(struct dev_info, devi_node_state)); 3081 3082 if (*flags & CPU_DTRACE_FAULT) 3083 break; 3084 3085 if (devi_state >= DS_INITIALIZED) { 3086 s = (char *)dtrace_loadptr(daddr + 3087 offsetof(struct dev_info, devi_addr)); 3088 len = dtrace_strlen(s, size); 3089 3090 if (*flags & CPU_DTRACE_FAULT) 3091 break; 3092 3093 if (len != 0) { 3094 if ((end -= (len + 1)) < start) 3095 break; 3096 3097 *end = '@'; 3098 } 3099 3100 for (i = 1; i <= len; i++) 3101 end[i] = dtrace_load8((uintptr_t)s++); 3102 } 3103 3104 /* 3105 * Now for the node name... 3106 */ 3107 s = (char *)dtrace_loadptr(daddr + 3108 offsetof(struct dev_info, devi_node_name)); 3109 3110 daddr = dtrace_loadptr(daddr + 3111 offsetof(struct dev_info, devi_parent)); 3112 3113 /* 3114 * If our parent is NULL (that is, if we're the root 3115 * node), we're going to use the special path 3116 * "devices". 3117 */ 3118 if (daddr == NULL) 3119 s = "devices"; 3120 3121 len = dtrace_strlen(s, size); 3122 if (*flags & CPU_DTRACE_FAULT) 3123 break; 3124 3125 if ((end -= (len + 1)) < start) 3126 break; 3127 3128 for (i = 1; i <= len; i++) 3129 end[i] = dtrace_load8((uintptr_t)s++); 3130 *end = '/'; 3131 3132 if (depth++ > dtrace_devdepth_max) { 3133 *flags |= CPU_DTRACE_ILLOP; 3134 break; 3135 } 3136 } 3137 3138 if (end < start) 3139 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3140 3141 if (daddr == NULL) { 3142 regs[rd] = (uintptr_t)end; 3143 mstate->dtms_scratch_ptr += size; 3144 } 3145 3146 break; 3147 } 3148 3149 case DIF_SUBR_STRJOIN: { 3150 char *d = (char *)mstate->dtms_scratch_ptr; 3151 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3152 uintptr_t s1 = tupregs[0].dttk_value; 3153 uintptr_t s2 = tupregs[1].dttk_value; 3154 int i = 0; 3155 3156 if (mstate->dtms_scratch_ptr + size > 3157 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3158 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3159 regs[rd] = NULL; 3160 break; 3161 } 3162 3163 for (;;) { 3164 if (i >= size) { 3165 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3166 regs[rd] = NULL; 3167 break; 3168 } 3169 3170 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3171 i--; 3172 break; 3173 } 3174 } 3175 3176 for (;;) { 3177 if (i >= size) { 3178 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3179 regs[rd] = NULL; 3180 break; 3181 } 3182 3183 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3184 break; 3185 } 3186 3187 if (i < size) { 3188 mstate->dtms_scratch_ptr += i; 3189 regs[rd] = (uintptr_t)d; 3190 } 3191 3192 break; 3193 } 3194 3195 case DIF_SUBR_LLTOSTR: { 3196 int64_t i = (int64_t)tupregs[0].dttk_value; 3197 int64_t val = i < 0 ? i * -1 : i; 3198 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3199 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3200 3201 if (mstate->dtms_scratch_ptr + size > 3202 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3203 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3204 regs[rd] = NULL; 3205 break; 3206 } 3207 3208 for (*end-- = '\0'; val; val /= 10) 3209 *end-- = '0' + (val % 10); 3210 3211 if (i == 0) 3212 *end-- = '0'; 3213 3214 if (i < 0) 3215 *end-- = '-'; 3216 3217 regs[rd] = (uintptr_t)end + 1; 3218 mstate->dtms_scratch_ptr += size; 3219 break; 3220 } 3221 3222 case DIF_SUBR_DIRNAME: 3223 case DIF_SUBR_BASENAME: { 3224 char *dest = (char *)mstate->dtms_scratch_ptr; 3225 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3226 uintptr_t src = tupregs[0].dttk_value; 3227 int i, j, len = dtrace_strlen((char *)src, size); 3228 int lastbase = -1, firstbase = -1, lastdir = -1; 3229 int start, end; 3230 3231 if (mstate->dtms_scratch_ptr + size > 3232 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3233 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3234 regs[rd] = NULL; 3235 break; 3236 } 3237 3238 /* 3239 * The basename and dirname for a zero-length string is 3240 * defined to be "." 3241 */ 3242 if (len == 0) { 3243 len = 1; 3244 src = (uintptr_t)"."; 3245 } 3246 3247 /* 3248 * Start from the back of the string, moving back toward the 3249 * front until we see a character that isn't a slash. That 3250 * character is the last character in the basename. 3251 */ 3252 for (i = len - 1; i >= 0; i--) { 3253 if (dtrace_load8(src + i) != '/') 3254 break; 3255 } 3256 3257 if (i >= 0) 3258 lastbase = i; 3259 3260 /* 3261 * Starting from the last character in the basename, move 3262 * towards the front until we find a slash. The character 3263 * that we processed immediately before that is the first 3264 * character in the basename. 3265 */ 3266 for (; i >= 0; i--) { 3267 if (dtrace_load8(src + i) == '/') 3268 break; 3269 } 3270 3271 if (i >= 0) 3272 firstbase = i + 1; 3273 3274 /* 3275 * Now keep going until we find a non-slash character. That 3276 * character is the last character in the dirname. 3277 */ 3278 for (; i >= 0; i--) { 3279 if (dtrace_load8(src + i) != '/') 3280 break; 3281 } 3282 3283 if (i >= 0) 3284 lastdir = i; 3285 3286 ASSERT(!(lastbase == -1 && firstbase != -1)); 3287 ASSERT(!(firstbase == -1 && lastdir != -1)); 3288 3289 if (lastbase == -1) { 3290 /* 3291 * We didn't find a non-slash character. We know that 3292 * the length is non-zero, so the whole string must be 3293 * slashes. In either the dirname or the basename 3294 * case, we return '/'. 3295 */ 3296 ASSERT(firstbase == -1); 3297 firstbase = lastbase = lastdir = 0; 3298 } 3299 3300 if (firstbase == -1) { 3301 /* 3302 * The entire string consists only of a basename 3303 * component. If we're looking for dirname, we need 3304 * to change our string to be just "."; if we're 3305 * looking for a basename, we'll just set the first 3306 * character of the basename to be 0. 3307 */ 3308 if (subr == DIF_SUBR_DIRNAME) { 3309 ASSERT(lastdir == -1); 3310 src = (uintptr_t)"."; 3311 lastdir = 0; 3312 } else { 3313 firstbase = 0; 3314 } 3315 } 3316 3317 if (subr == DIF_SUBR_DIRNAME) { 3318 if (lastdir == -1) { 3319 /* 3320 * We know that we have a slash in the name -- 3321 * or lastdir would be set to 0, above. And 3322 * because lastdir is -1, we know that this 3323 * slash must be the first character. (That 3324 * is, the full string must be of the form 3325 * "/basename".) In this case, the last 3326 * character of the directory name is 0. 3327 */ 3328 lastdir = 0; 3329 } 3330 3331 start = 0; 3332 end = lastdir; 3333 } else { 3334 ASSERT(subr == DIF_SUBR_BASENAME); 3335 ASSERT(firstbase != -1 && lastbase != -1); 3336 start = firstbase; 3337 end = lastbase; 3338 } 3339 3340 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3341 dest[j] = dtrace_load8(src + i); 3342 3343 dest[j] = '\0'; 3344 regs[rd] = (uintptr_t)dest; 3345 mstate->dtms_scratch_ptr += size; 3346 break; 3347 } 3348 3349 case DIF_SUBR_CLEANPATH: { 3350 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3351 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3352 uintptr_t src = tupregs[0].dttk_value; 3353 int i = 0, j = 0; 3354 3355 if (mstate->dtms_scratch_ptr + size > 3356 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3357 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3358 regs[rd] = NULL; 3359 break; 3360 } 3361 3362 /* 3363 * Move forward, loading each character. 3364 */ 3365 do { 3366 c = dtrace_load8(src + i++); 3367 next: 3368 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3369 break; 3370 3371 if (c != '/') { 3372 dest[j++] = c; 3373 continue; 3374 } 3375 3376 c = dtrace_load8(src + i++); 3377 3378 if (c == '/') { 3379 /* 3380 * We have two slashes -- we can just advance 3381 * to the next character. 3382 */ 3383 goto next; 3384 } 3385 3386 if (c != '.') { 3387 /* 3388 * This is not "." and it's not ".." -- we can 3389 * just store the "/" and this character and 3390 * drive on. 3391 */ 3392 dest[j++] = '/'; 3393 dest[j++] = c; 3394 continue; 3395 } 3396 3397 c = dtrace_load8(src + i++); 3398 3399 if (c == '/') { 3400 /* 3401 * This is a "/./" component. We're not going 3402 * to store anything in the destination buffer; 3403 * we're just going to go to the next component. 3404 */ 3405 goto next; 3406 } 3407 3408 if (c != '.') { 3409 /* 3410 * This is not ".." -- we can just store the 3411 * "/." and this character and continue 3412 * processing. 3413 */ 3414 dest[j++] = '/'; 3415 dest[j++] = '.'; 3416 dest[j++] = c; 3417 continue; 3418 } 3419 3420 c = dtrace_load8(src + i++); 3421 3422 if (c != '/' && c != '\0') { 3423 /* 3424 * This is not ".." -- it's "..[mumble]". 3425 * We'll store the "/.." and this character 3426 * and continue processing. 3427 */ 3428 dest[j++] = '/'; 3429 dest[j++] = '.'; 3430 dest[j++] = '.'; 3431 dest[j++] = c; 3432 continue; 3433 } 3434 3435 /* 3436 * This is "/../" or "/..\0". We need to back up 3437 * our destination pointer until we find a "/". 3438 */ 3439 i--; 3440 while (j != 0 && dest[--j] != '/') 3441 continue; 3442 3443 if (c == '\0') 3444 dest[++j] = '/'; 3445 } while (c != '\0'); 3446 3447 dest[j] = '\0'; 3448 regs[rd] = (uintptr_t)dest; 3449 mstate->dtms_scratch_ptr += size; 3450 break; 3451 } 3452 } 3453 } 3454 3455 /* 3456 * Emulate the execution of DTrace IR instructions specified by the given 3457 * DIF object. This function is deliberately void of assertions as all of 3458 * the necessary checks are handled by a call to dtrace_difo_validate(). 3459 */ 3460 static uint64_t 3461 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 3462 dtrace_vstate_t *vstate, dtrace_state_t *state) 3463 { 3464 const dif_instr_t *text = difo->dtdo_buf; 3465 const uint_t textlen = difo->dtdo_len; 3466 const char *strtab = difo->dtdo_strtab; 3467 const uint64_t *inttab = difo->dtdo_inttab; 3468 3469 uint64_t rval = 0; 3470 dtrace_statvar_t *svar; 3471 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 3472 dtrace_difv_t *v; 3473 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3474 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3475 3476 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 3477 uint64_t regs[DIF_DIR_NREGS]; 3478 uint64_t *tmp; 3479 3480 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 3481 int64_t cc_r; 3482 uint_t pc = 0, id, opc; 3483 uint8_t ttop = 0; 3484 dif_instr_t instr; 3485 uint_t r1, r2, rd; 3486 3487 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 3488 3489 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 3490 opc = pc; 3491 3492 instr = text[pc++]; 3493 r1 = DIF_INSTR_R1(instr); 3494 r2 = DIF_INSTR_R2(instr); 3495 rd = DIF_INSTR_RD(instr); 3496 3497 switch (DIF_INSTR_OP(instr)) { 3498 case DIF_OP_OR: 3499 regs[rd] = regs[r1] | regs[r2]; 3500 break; 3501 case DIF_OP_XOR: 3502 regs[rd] = regs[r1] ^ regs[r2]; 3503 break; 3504 case DIF_OP_AND: 3505 regs[rd] = regs[r1] & regs[r2]; 3506 break; 3507 case DIF_OP_SLL: 3508 regs[rd] = regs[r1] << regs[r2]; 3509 break; 3510 case DIF_OP_SRL: 3511 regs[rd] = regs[r1] >> regs[r2]; 3512 break; 3513 case DIF_OP_SUB: 3514 regs[rd] = regs[r1] - regs[r2]; 3515 break; 3516 case DIF_OP_ADD: 3517 regs[rd] = regs[r1] + regs[r2]; 3518 break; 3519 case DIF_OP_MUL: 3520 regs[rd] = regs[r1] * regs[r2]; 3521 break; 3522 case DIF_OP_SDIV: 3523 if (regs[r2] == 0) { 3524 regs[rd] = 0; 3525 *flags |= CPU_DTRACE_DIVZERO; 3526 } else { 3527 regs[rd] = (int64_t)regs[r1] / 3528 (int64_t)regs[r2]; 3529 } 3530 break; 3531 3532 case DIF_OP_UDIV: 3533 if (regs[r2] == 0) { 3534 regs[rd] = 0; 3535 *flags |= CPU_DTRACE_DIVZERO; 3536 } else { 3537 regs[rd] = regs[r1] / regs[r2]; 3538 } 3539 break; 3540 3541 case DIF_OP_SREM: 3542 if (regs[r2] == 0) { 3543 regs[rd] = 0; 3544 *flags |= CPU_DTRACE_DIVZERO; 3545 } else { 3546 regs[rd] = (int64_t)regs[r1] % 3547 (int64_t)regs[r2]; 3548 } 3549 break; 3550 3551 case DIF_OP_UREM: 3552 if (regs[r2] == 0) { 3553 regs[rd] = 0; 3554 *flags |= CPU_DTRACE_DIVZERO; 3555 } else { 3556 regs[rd] = regs[r1] % regs[r2]; 3557 } 3558 break; 3559 3560 case DIF_OP_NOT: 3561 regs[rd] = ~regs[r1]; 3562 break; 3563 case DIF_OP_MOV: 3564 regs[rd] = regs[r1]; 3565 break; 3566 case DIF_OP_CMP: 3567 cc_r = regs[r1] - regs[r2]; 3568 cc_n = cc_r < 0; 3569 cc_z = cc_r == 0; 3570 cc_v = 0; 3571 cc_c = regs[r1] < regs[r2]; 3572 break; 3573 case DIF_OP_TST: 3574 cc_n = cc_v = cc_c = 0; 3575 cc_z = regs[r1] == 0; 3576 break; 3577 case DIF_OP_BA: 3578 pc = DIF_INSTR_LABEL(instr); 3579 break; 3580 case DIF_OP_BE: 3581 if (cc_z) 3582 pc = DIF_INSTR_LABEL(instr); 3583 break; 3584 case DIF_OP_BNE: 3585 if (cc_z == 0) 3586 pc = DIF_INSTR_LABEL(instr); 3587 break; 3588 case DIF_OP_BG: 3589 if ((cc_z | (cc_n ^ cc_v)) == 0) 3590 pc = DIF_INSTR_LABEL(instr); 3591 break; 3592 case DIF_OP_BGU: 3593 if ((cc_c | cc_z) == 0) 3594 pc = DIF_INSTR_LABEL(instr); 3595 break; 3596 case DIF_OP_BGE: 3597 if ((cc_n ^ cc_v) == 0) 3598 pc = DIF_INSTR_LABEL(instr); 3599 break; 3600 case DIF_OP_BGEU: 3601 if (cc_c == 0) 3602 pc = DIF_INSTR_LABEL(instr); 3603 break; 3604 case DIF_OP_BL: 3605 if (cc_n ^ cc_v) 3606 pc = DIF_INSTR_LABEL(instr); 3607 break; 3608 case DIF_OP_BLU: 3609 if (cc_c) 3610 pc = DIF_INSTR_LABEL(instr); 3611 break; 3612 case DIF_OP_BLE: 3613 if (cc_z | (cc_n ^ cc_v)) 3614 pc = DIF_INSTR_LABEL(instr); 3615 break; 3616 case DIF_OP_BLEU: 3617 if (cc_c | cc_z) 3618 pc = DIF_INSTR_LABEL(instr); 3619 break; 3620 case DIF_OP_RLDSB: 3621 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3622 *flags |= CPU_DTRACE_KPRIV; 3623 *illval = regs[r1]; 3624 break; 3625 } 3626 /*FALLTHROUGH*/ 3627 case DIF_OP_LDSB: 3628 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 3629 break; 3630 case DIF_OP_RLDSH: 3631 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3632 *flags |= CPU_DTRACE_KPRIV; 3633 *illval = regs[r1]; 3634 break; 3635 } 3636 /*FALLTHROUGH*/ 3637 case DIF_OP_LDSH: 3638 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 3639 break; 3640 case DIF_OP_RLDSW: 3641 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3642 *flags |= CPU_DTRACE_KPRIV; 3643 *illval = regs[r1]; 3644 break; 3645 } 3646 /*FALLTHROUGH*/ 3647 case DIF_OP_LDSW: 3648 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 3649 break; 3650 case DIF_OP_RLDUB: 3651 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3652 *flags |= CPU_DTRACE_KPRIV; 3653 *illval = regs[r1]; 3654 break; 3655 } 3656 /*FALLTHROUGH*/ 3657 case DIF_OP_LDUB: 3658 regs[rd] = dtrace_load8(regs[r1]); 3659 break; 3660 case DIF_OP_RLDUH: 3661 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3662 *flags |= CPU_DTRACE_KPRIV; 3663 *illval = regs[r1]; 3664 break; 3665 } 3666 /*FALLTHROUGH*/ 3667 case DIF_OP_LDUH: 3668 regs[rd] = dtrace_load16(regs[r1]); 3669 break; 3670 case DIF_OP_RLDUW: 3671 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3672 *flags |= CPU_DTRACE_KPRIV; 3673 *illval = regs[r1]; 3674 break; 3675 } 3676 /*FALLTHROUGH*/ 3677 case DIF_OP_LDUW: 3678 regs[rd] = dtrace_load32(regs[r1]); 3679 break; 3680 case DIF_OP_RLDX: 3681 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 3682 *flags |= CPU_DTRACE_KPRIV; 3683 *illval = regs[r1]; 3684 break; 3685 } 3686 /*FALLTHROUGH*/ 3687 case DIF_OP_LDX: 3688 regs[rd] = dtrace_load64(regs[r1]); 3689 break; 3690 case DIF_OP_ULDSB: 3691 regs[rd] = (int8_t) 3692 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3693 break; 3694 case DIF_OP_ULDSH: 3695 regs[rd] = (int16_t) 3696 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3697 break; 3698 case DIF_OP_ULDSW: 3699 regs[rd] = (int32_t) 3700 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3701 break; 3702 case DIF_OP_ULDUB: 3703 regs[rd] = 3704 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3705 break; 3706 case DIF_OP_ULDUH: 3707 regs[rd] = 3708 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3709 break; 3710 case DIF_OP_ULDUW: 3711 regs[rd] = 3712 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3713 break; 3714 case DIF_OP_ULDX: 3715 regs[rd] = 3716 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 3717 break; 3718 case DIF_OP_RET: 3719 rval = regs[rd]; 3720 break; 3721 case DIF_OP_NOP: 3722 break; 3723 case DIF_OP_SETX: 3724 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 3725 break; 3726 case DIF_OP_SETS: 3727 regs[rd] = (uint64_t)(uintptr_t) 3728 (strtab + DIF_INSTR_STRING(instr)); 3729 break; 3730 case DIF_OP_SCMP: 3731 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1], 3732 (char *)(uintptr_t)regs[r2], 3733 state->dts_options[DTRACEOPT_STRSIZE]); 3734 3735 cc_n = cc_r < 0; 3736 cc_z = cc_r == 0; 3737 cc_v = cc_c = 0; 3738 break; 3739 case DIF_OP_LDGA: 3740 regs[rd] = dtrace_dif_variable(mstate, state, 3741 r1, regs[r2]); 3742 break; 3743 case DIF_OP_LDGS: 3744 id = DIF_INSTR_VAR(instr); 3745 3746 if (id >= DIF_VAR_OTHER_UBASE) { 3747 uintptr_t a; 3748 3749 id -= DIF_VAR_OTHER_UBASE; 3750 svar = vstate->dtvs_globals[id]; 3751 ASSERT(svar != NULL); 3752 v = &svar->dtsv_var; 3753 3754 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 3755 regs[rd] = svar->dtsv_data; 3756 break; 3757 } 3758 3759 a = (uintptr_t)svar->dtsv_data; 3760 3761 if (*(uint8_t *)a == UINT8_MAX) { 3762 /* 3763 * If the 0th byte is set to UINT8_MAX 3764 * then this is to be treated as a 3765 * reference to a NULL variable. 3766 */ 3767 regs[rd] = NULL; 3768 } else { 3769 regs[rd] = a + sizeof (uint64_t); 3770 } 3771 3772 break; 3773 } 3774 3775 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 3776 break; 3777 3778 case DIF_OP_STGS: 3779 id = DIF_INSTR_VAR(instr); 3780 3781 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3782 id -= DIF_VAR_OTHER_UBASE; 3783 3784 svar = vstate->dtvs_globals[id]; 3785 ASSERT(svar != NULL); 3786 v = &svar->dtsv_var; 3787 3788 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3789 uintptr_t a = (uintptr_t)svar->dtsv_data; 3790 3791 ASSERT(a != NULL); 3792 ASSERT(svar->dtsv_size != 0); 3793 3794 if (regs[rd] == NULL) { 3795 *(uint8_t *)a = UINT8_MAX; 3796 break; 3797 } else { 3798 *(uint8_t *)a = 0; 3799 a += sizeof (uint64_t); 3800 } 3801 3802 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3803 (void *)a, &v->dtdv_type); 3804 break; 3805 } 3806 3807 svar->dtsv_data = regs[rd]; 3808 break; 3809 3810 case DIF_OP_LDTA: 3811 /* 3812 * There are no DTrace built-in thread-local arrays at 3813 * present. This opcode is saved for future work. 3814 */ 3815 *flags |= CPU_DTRACE_ILLOP; 3816 regs[rd] = 0; 3817 break; 3818 3819 case DIF_OP_LDLS: 3820 id = DIF_INSTR_VAR(instr); 3821 3822 if (id < DIF_VAR_OTHER_UBASE) { 3823 /* 3824 * For now, this has no meaning. 3825 */ 3826 regs[rd] = 0; 3827 break; 3828 } 3829 3830 id -= DIF_VAR_OTHER_UBASE; 3831 3832 ASSERT(id < vstate->dtvs_nlocals); 3833 ASSERT(vstate->dtvs_locals != NULL); 3834 3835 svar = vstate->dtvs_locals[id]; 3836 ASSERT(svar != NULL); 3837 v = &svar->dtsv_var; 3838 3839 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3840 uintptr_t a = (uintptr_t)svar->dtsv_data; 3841 size_t sz = v->dtdv_type.dtdt_size; 3842 3843 sz += sizeof (uint64_t); 3844 ASSERT(svar->dtsv_size == NCPU * sz); 3845 a += CPU->cpu_id * sz; 3846 3847 if (*(uint8_t *)a == UINT8_MAX) { 3848 /* 3849 * If the 0th byte is set to UINT8_MAX 3850 * then this is to be treated as a 3851 * reference to a NULL variable. 3852 */ 3853 regs[rd] = NULL; 3854 } else { 3855 regs[rd] = a + sizeof (uint64_t); 3856 } 3857 3858 break; 3859 } 3860 3861 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3862 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 3863 regs[rd] = tmp[CPU->cpu_id]; 3864 break; 3865 3866 case DIF_OP_STLS: 3867 id = DIF_INSTR_VAR(instr); 3868 3869 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3870 id -= DIF_VAR_OTHER_UBASE; 3871 ASSERT(id < vstate->dtvs_nlocals); 3872 3873 ASSERT(vstate->dtvs_locals != NULL); 3874 svar = vstate->dtvs_locals[id]; 3875 ASSERT(svar != NULL); 3876 v = &svar->dtsv_var; 3877 3878 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3879 uintptr_t a = (uintptr_t)svar->dtsv_data; 3880 size_t sz = v->dtdv_type.dtdt_size; 3881 3882 sz += sizeof (uint64_t); 3883 ASSERT(svar->dtsv_size == NCPU * sz); 3884 a += CPU->cpu_id * sz; 3885 3886 if (regs[rd] == NULL) { 3887 *(uint8_t *)a = UINT8_MAX; 3888 break; 3889 } else { 3890 *(uint8_t *)a = 0; 3891 a += sizeof (uint64_t); 3892 } 3893 3894 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3895 (void *)a, &v->dtdv_type); 3896 break; 3897 } 3898 3899 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3900 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 3901 tmp[CPU->cpu_id] = regs[rd]; 3902 break; 3903 3904 case DIF_OP_LDTS: { 3905 dtrace_dynvar_t *dvar; 3906 dtrace_key_t *key; 3907 3908 id = DIF_INSTR_VAR(instr); 3909 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3910 id -= DIF_VAR_OTHER_UBASE; 3911 v = &vstate->dtvs_tlocals[id]; 3912 3913 key = &tupregs[DIF_DTR_NREGS]; 3914 key[0].dttk_value = (uint64_t)id; 3915 key[0].dttk_size = 0; 3916 DTRACE_TLS_THRKEY(key[1].dttk_value); 3917 key[1].dttk_size = 0; 3918 3919 dvar = dtrace_dynvar(dstate, 2, key, 3920 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC); 3921 3922 if (dvar == NULL) { 3923 regs[rd] = 0; 3924 break; 3925 } 3926 3927 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3928 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 3929 } else { 3930 regs[rd] = *((uint64_t *)dvar->dtdv_data); 3931 } 3932 3933 break; 3934 } 3935 3936 case DIF_OP_STTS: { 3937 dtrace_dynvar_t *dvar; 3938 dtrace_key_t *key; 3939 3940 id = DIF_INSTR_VAR(instr); 3941 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3942 id -= DIF_VAR_OTHER_UBASE; 3943 3944 key = &tupregs[DIF_DTR_NREGS]; 3945 key[0].dttk_value = (uint64_t)id; 3946 key[0].dttk_size = 0; 3947 DTRACE_TLS_THRKEY(key[1].dttk_value); 3948 key[1].dttk_size = 0; 3949 v = &vstate->dtvs_tlocals[id]; 3950 3951 dvar = dtrace_dynvar(dstate, 2, key, 3952 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 3953 v->dtdv_type.dtdt_size : sizeof (uint64_t), 3954 regs[rd] ? DTRACE_DYNVAR_ALLOC : 3955 DTRACE_DYNVAR_DEALLOC); 3956 3957 /* 3958 * Given that we're storing to thread-local data, 3959 * we need to flush our predicate cache. 3960 */ 3961 curthread->t_predcache = NULL; 3962 3963 if (dvar == NULL) 3964 break; 3965 3966 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3967 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3968 dvar->dtdv_data, &v->dtdv_type); 3969 } else { 3970 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 3971 } 3972 3973 break; 3974 } 3975 3976 case DIF_OP_SRA: 3977 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 3978 break; 3979 3980 case DIF_OP_CALL: 3981 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 3982 regs, tupregs, ttop, mstate, state); 3983 break; 3984 3985 case DIF_OP_PUSHTR: 3986 if (ttop == DIF_DTR_NREGS) { 3987 *flags |= CPU_DTRACE_TUPOFLOW; 3988 break; 3989 } 3990 3991 if (r1 == DIF_TYPE_STRING) { 3992 /* 3993 * If this is a string type and the size is 0, 3994 * we'll use the system-wide default string 3995 * size. Note that we are _not_ looking at 3996 * the value of the DTRACEOPT_STRSIZE option; 3997 * had this been set, we would expect to have 3998 * a non-zero size value in the "pushtr". 3999 */ 4000 tupregs[ttop].dttk_size = 4001 dtrace_strlen((char *)(uintptr_t)regs[rd], 4002 regs[r2] ? regs[r2] : 4003 dtrace_strsize_default) + 1; 4004 } else { 4005 tupregs[ttop].dttk_size = regs[r2]; 4006 } 4007 4008 tupregs[ttop++].dttk_value = regs[rd]; 4009 break; 4010 4011 case DIF_OP_PUSHTV: 4012 if (ttop == DIF_DTR_NREGS) { 4013 *flags |= CPU_DTRACE_TUPOFLOW; 4014 break; 4015 } 4016 4017 tupregs[ttop].dttk_value = regs[rd]; 4018 tupregs[ttop++].dttk_size = 0; 4019 break; 4020 4021 case DIF_OP_POPTS: 4022 if (ttop != 0) 4023 ttop--; 4024 break; 4025 4026 case DIF_OP_FLUSHTS: 4027 ttop = 0; 4028 break; 4029 4030 case DIF_OP_LDGAA: 4031 case DIF_OP_LDTAA: { 4032 dtrace_dynvar_t *dvar; 4033 dtrace_key_t *key = tupregs; 4034 uint_t nkeys = ttop; 4035 4036 id = DIF_INSTR_VAR(instr); 4037 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4038 id -= DIF_VAR_OTHER_UBASE; 4039 4040 key[nkeys].dttk_value = (uint64_t)id; 4041 key[nkeys++].dttk_size = 0; 4042 4043 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4044 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4045 key[nkeys++].dttk_size = 0; 4046 v = &vstate->dtvs_tlocals[id]; 4047 } else { 4048 v = &vstate->dtvs_globals[id]->dtsv_var; 4049 } 4050 4051 dvar = dtrace_dynvar(dstate, nkeys, key, 4052 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4053 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4054 DTRACE_DYNVAR_NOALLOC); 4055 4056 if (dvar == NULL) { 4057 regs[rd] = 0; 4058 break; 4059 } 4060 4061 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4062 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4063 } else { 4064 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4065 } 4066 4067 break; 4068 } 4069 4070 case DIF_OP_STGAA: 4071 case DIF_OP_STTAA: { 4072 dtrace_dynvar_t *dvar; 4073 dtrace_key_t *key = tupregs; 4074 uint_t nkeys = ttop; 4075 4076 id = DIF_INSTR_VAR(instr); 4077 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4078 id -= DIF_VAR_OTHER_UBASE; 4079 4080 key[nkeys].dttk_value = (uint64_t)id; 4081 key[nkeys++].dttk_size = 0; 4082 4083 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4084 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4085 key[nkeys++].dttk_size = 0; 4086 v = &vstate->dtvs_tlocals[id]; 4087 } else { 4088 v = &vstate->dtvs_globals[id]->dtsv_var; 4089 } 4090 4091 dvar = dtrace_dynvar(dstate, nkeys, key, 4092 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4093 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4094 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4095 DTRACE_DYNVAR_DEALLOC); 4096 4097 if (dvar == NULL) 4098 break; 4099 4100 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4101 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4102 dvar->dtdv_data, &v->dtdv_type); 4103 } else { 4104 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4105 } 4106 4107 break; 4108 } 4109 4110 case DIF_OP_ALLOCS: { 4111 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4112 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4113 4114 if (mstate->dtms_scratch_ptr + size > 4115 mstate->dtms_scratch_base + 4116 mstate->dtms_scratch_size) { 4117 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4118 regs[rd] = NULL; 4119 } else { 4120 dtrace_bzero((void *) 4121 mstate->dtms_scratch_ptr, size); 4122 mstate->dtms_scratch_ptr += size; 4123 regs[rd] = ptr; 4124 } 4125 break; 4126 } 4127 4128 case DIF_OP_COPYS: 4129 if (!dtrace_canstore(regs[rd], regs[r2], 4130 mstate, vstate)) { 4131 *flags |= CPU_DTRACE_BADADDR; 4132 *illval = regs[rd]; 4133 break; 4134 } 4135 4136 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4137 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4138 break; 4139 4140 case DIF_OP_STB: 4141 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4142 *flags |= CPU_DTRACE_BADADDR; 4143 *illval = regs[rd]; 4144 break; 4145 } 4146 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4147 break; 4148 4149 case DIF_OP_STH: 4150 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4151 *flags |= CPU_DTRACE_BADADDR; 4152 *illval = regs[rd]; 4153 break; 4154 } 4155 if (regs[rd] & 1) { 4156 *flags |= CPU_DTRACE_BADALIGN; 4157 *illval = regs[rd]; 4158 break; 4159 } 4160 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4161 break; 4162 4163 case DIF_OP_STW: 4164 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4165 *flags |= CPU_DTRACE_BADADDR; 4166 *illval = regs[rd]; 4167 break; 4168 } 4169 if (regs[rd] & 3) { 4170 *flags |= CPU_DTRACE_BADALIGN; 4171 *illval = regs[rd]; 4172 break; 4173 } 4174 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4175 break; 4176 4177 case DIF_OP_STX: 4178 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4179 *flags |= CPU_DTRACE_BADADDR; 4180 *illval = regs[rd]; 4181 break; 4182 } 4183 if (regs[rd] & 7) { 4184 *flags |= CPU_DTRACE_BADALIGN; 4185 *illval = regs[rd]; 4186 break; 4187 } 4188 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4189 break; 4190 } 4191 } 4192 4193 if (!(*flags & CPU_DTRACE_FAULT)) 4194 return (rval); 4195 4196 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4197 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4198 4199 return (0); 4200 } 4201 4202 static void 4203 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4204 { 4205 dtrace_probe_t *probe = ecb->dte_probe; 4206 dtrace_provider_t *prov = probe->dtpr_provider; 4207 char c[DTRACE_FULLNAMELEN + 80], *str; 4208 char *msg = "dtrace: breakpoint action at probe "; 4209 char *ecbmsg = " (ecb "; 4210 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4211 uintptr_t val = (uintptr_t)ecb; 4212 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4213 4214 if (dtrace_destructive_disallow) 4215 return; 4216 4217 /* 4218 * It's impossible to be taking action on the NULL probe. 4219 */ 4220 ASSERT(probe != NULL); 4221 4222 /* 4223 * This is a poor man's (destitute man's?) sprintf(): we want to 4224 * print the provider name, module name, function name and name of 4225 * the probe, along with the hex address of the ECB with the breakpoint 4226 * action -- all of which we must place in the character buffer by 4227 * hand. 4228 */ 4229 while (*msg != '\0') 4230 c[i++] = *msg++; 4231 4232 for (str = prov->dtpv_name; *str != '\0'; str++) 4233 c[i++] = *str; 4234 c[i++] = ':'; 4235 4236 for (str = probe->dtpr_mod; *str != '\0'; str++) 4237 c[i++] = *str; 4238 c[i++] = ':'; 4239 4240 for (str = probe->dtpr_func; *str != '\0'; str++) 4241 c[i++] = *str; 4242 c[i++] = ':'; 4243 4244 for (str = probe->dtpr_name; *str != '\0'; str++) 4245 c[i++] = *str; 4246 4247 while (*ecbmsg != '\0') 4248 c[i++] = *ecbmsg++; 4249 4250 while (shift >= 0) { 4251 mask = (uintptr_t)0xf << shift; 4252 4253 if (val >= ((uintptr_t)1 << shift)) 4254 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4255 shift -= 4; 4256 } 4257 4258 c[i++] = ')'; 4259 c[i] = '\0'; 4260 4261 debug_enter(c); 4262 } 4263 4264 static void 4265 dtrace_action_panic(dtrace_ecb_t *ecb) 4266 { 4267 dtrace_probe_t *probe = ecb->dte_probe; 4268 4269 /* 4270 * It's impossible to be taking action on the NULL probe. 4271 */ 4272 ASSERT(probe != NULL); 4273 4274 if (dtrace_destructive_disallow) 4275 return; 4276 4277 if (dtrace_panicked != NULL) 4278 return; 4279 4280 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4281 return; 4282 4283 /* 4284 * We won the right to panic. (We want to be sure that only one 4285 * thread calls panic() from dtrace_probe(), and that panic() is 4286 * called exactly once.) 4287 */ 4288 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4289 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4290 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4291 } 4292 4293 static void 4294 dtrace_action_raise(uint64_t sig) 4295 { 4296 if (dtrace_destructive_disallow) 4297 return; 4298 4299 if (sig >= NSIG) { 4300 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4301 return; 4302 } 4303 4304 /* 4305 * raise() has a queue depth of 1 -- we ignore all subsequent 4306 * invocations of the raise() action. 4307 */ 4308 if (curthread->t_dtrace_sig == 0) 4309 curthread->t_dtrace_sig = (uint8_t)sig; 4310 4311 curthread->t_sig_check = 1; 4312 aston(curthread); 4313 } 4314 4315 static void 4316 dtrace_action_stop(void) 4317 { 4318 if (dtrace_destructive_disallow) 4319 return; 4320 4321 if (!curthread->t_dtrace_stop) { 4322 curthread->t_dtrace_stop = 1; 4323 curthread->t_sig_check = 1; 4324 aston(curthread); 4325 } 4326 } 4327 4328 static void 4329 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4330 { 4331 hrtime_t now; 4332 volatile uint16_t *flags; 4333 cpu_t *cpu = CPU; 4334 4335 if (dtrace_destructive_disallow) 4336 return; 4337 4338 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4339 4340 now = dtrace_gethrtime(); 4341 4342 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4343 /* 4344 * We need to advance the mark to the current time. 4345 */ 4346 cpu->cpu_dtrace_chillmark = now; 4347 cpu->cpu_dtrace_chilled = 0; 4348 } 4349 4350 /* 4351 * Now check to see if the requested chill time would take us over 4352 * the maximum amount of time allowed in the chill interval. (Or 4353 * worse, if the calculation itself induces overflow.) 4354 */ 4355 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4356 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4357 *flags |= CPU_DTRACE_ILLOP; 4358 return; 4359 } 4360 4361 while (dtrace_gethrtime() - now < val) 4362 continue; 4363 4364 /* 4365 * Normally, we assure that the value of the variable "timestamp" does 4366 * not change within an ECB. The presence of chill() represents an 4367 * exception to this rule, however. 4368 */ 4369 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4370 cpu->cpu_dtrace_chilled += val; 4371 } 4372 4373 static void 4374 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4375 uint64_t *buf, uint64_t arg) 4376 { 4377 int nframes = DTRACE_USTACK_NFRAMES(arg); 4378 int strsize = DTRACE_USTACK_STRSIZE(arg); 4379 uint64_t *pcs = &buf[1], *fps; 4380 char *str = (char *)&pcs[nframes]; 4381 int size, offs = 0, i, j; 4382 uintptr_t old = mstate->dtms_scratch_ptr, saved; 4383 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4384 char *sym; 4385 4386 /* 4387 * Should be taking a faster path if string space has not been 4388 * allocated. 4389 */ 4390 ASSERT(strsize != 0); 4391 4392 /* 4393 * We will first allocate some temporary space for the frame pointers. 4394 */ 4395 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4396 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 4397 (nframes * sizeof (uint64_t)); 4398 4399 if (mstate->dtms_scratch_ptr + size > 4400 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 4401 /* 4402 * Not enough room for our frame pointers -- need to indicate 4403 * that we ran out of scratch space. 4404 */ 4405 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4406 return; 4407 } 4408 4409 mstate->dtms_scratch_ptr += size; 4410 saved = mstate->dtms_scratch_ptr; 4411 4412 /* 4413 * Now get a stack with both program counters and frame pointers. 4414 */ 4415 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4416 dtrace_getufpstack(buf, fps, nframes + 1); 4417 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4418 4419 /* 4420 * If that faulted, we're cooked. 4421 */ 4422 if (*flags & CPU_DTRACE_FAULT) 4423 goto out; 4424 4425 /* 4426 * Now we want to walk up the stack, calling the USTACK helper. For 4427 * each iteration, we restore the scratch pointer. 4428 */ 4429 for (i = 0; i < nframes; i++) { 4430 mstate->dtms_scratch_ptr = saved; 4431 4432 if (offs >= strsize) 4433 break; 4434 4435 sym = (char *)(uintptr_t)dtrace_helper( 4436 DTRACE_HELPER_ACTION_USTACK, 4437 mstate, state, pcs[i], fps[i]); 4438 4439 /* 4440 * If we faulted while running the helper, we're going to 4441 * clear the fault and null out the corresponding string. 4442 */ 4443 if (*flags & CPU_DTRACE_FAULT) { 4444 *flags &= ~CPU_DTRACE_FAULT; 4445 str[offs++] = '\0'; 4446 continue; 4447 } 4448 4449 if (sym == NULL) { 4450 str[offs++] = '\0'; 4451 continue; 4452 } 4453 4454 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4455 4456 /* 4457 * Now copy in the string that the helper returned to us. 4458 */ 4459 for (j = 0; offs + j < strsize; j++) { 4460 if ((str[offs + j] = sym[j]) == '\0') 4461 break; 4462 } 4463 4464 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4465 4466 offs += j + 1; 4467 } 4468 4469 if (offs >= strsize) { 4470 /* 4471 * If we didn't have room for all of the strings, we don't 4472 * abort processing -- this needn't be a fatal error -- but we 4473 * still want to increment a counter (dts_stkstroverflows) to 4474 * allow this condition to be warned about. (If this is from 4475 * a jstack() action, it is easily tuned via jstackstrsize.) 4476 */ 4477 dtrace_error(&state->dts_stkstroverflows); 4478 } 4479 4480 while (offs < strsize) 4481 str[offs++] = '\0'; 4482 4483 out: 4484 mstate->dtms_scratch_ptr = old; 4485 } 4486 4487 /* 4488 * If you're looking for the epicenter of DTrace, you just found it. This 4489 * is the function called by the provider to fire a probe -- from which all 4490 * subsequent probe-context DTrace activity emanates. 4491 */ 4492 void 4493 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 4494 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 4495 { 4496 processorid_t cpuid; 4497 dtrace_icookie_t cookie; 4498 dtrace_probe_t *probe; 4499 dtrace_mstate_t mstate; 4500 dtrace_ecb_t *ecb; 4501 dtrace_action_t *act; 4502 intptr_t offs; 4503 size_t size; 4504 int vtime, onintr; 4505 volatile uint16_t *flags; 4506 hrtime_t now; 4507 4508 /* 4509 * Kick out immediately if this CPU is still being born (in which case 4510 * curthread will be set to -1) 4511 */ 4512 if ((uintptr_t)curthread & 1) 4513 return; 4514 4515 cookie = dtrace_interrupt_disable(); 4516 probe = dtrace_probes[id - 1]; 4517 cpuid = CPU->cpu_id; 4518 onintr = CPU_ON_INTR(CPU); 4519 4520 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 4521 probe->dtpr_predcache == curthread->t_predcache) { 4522 /* 4523 * We have hit in the predicate cache; we know that 4524 * this predicate would evaluate to be false. 4525 */ 4526 dtrace_interrupt_enable(cookie); 4527 return; 4528 } 4529 4530 if (panic_quiesce) { 4531 /* 4532 * We don't trace anything if we're panicking. 4533 */ 4534 dtrace_interrupt_enable(cookie); 4535 return; 4536 } 4537 4538 now = dtrace_gethrtime(); 4539 vtime = dtrace_vtime_references != 0; 4540 4541 if (vtime && curthread->t_dtrace_start) 4542 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 4543 4544 mstate.dtms_probe = probe; 4545 mstate.dtms_arg[0] = arg0; 4546 mstate.dtms_arg[1] = arg1; 4547 mstate.dtms_arg[2] = arg2; 4548 mstate.dtms_arg[3] = arg3; 4549 mstate.dtms_arg[4] = arg4; 4550 4551 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 4552 4553 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 4554 dtrace_predicate_t *pred = ecb->dte_predicate; 4555 dtrace_state_t *state = ecb->dte_state; 4556 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 4557 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 4558 dtrace_vstate_t *vstate = &state->dts_vstate; 4559 dtrace_provider_t *prov = probe->dtpr_provider; 4560 int committed = 0; 4561 caddr_t tomax; 4562 4563 /* 4564 * A little subtlety with the following (seemingly innocuous) 4565 * declaration of the automatic 'val': by looking at the 4566 * code, you might think that it could be declared in the 4567 * action processing loop, below. (That is, it's only used in 4568 * the action processing loop.) However, it must be declared 4569 * out of that scope because in the case of DIF expression 4570 * arguments to aggregating actions, one iteration of the 4571 * action loop will use the last iteration's value. 4572 */ 4573 #ifdef lint 4574 uint64_t val = 0; 4575 #else 4576 uint64_t val; 4577 #endif 4578 4579 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 4580 *flags &= ~CPU_DTRACE_ERROR; 4581 4582 if (prov == dtrace_provider) { 4583 /* 4584 * If dtrace itself is the provider of this probe, 4585 * we're only going to continue processing the ECB if 4586 * arg0 (the dtrace_state_t) is equal to the ECB's 4587 * creating state. (This prevents disjoint consumers 4588 * from seeing one another's metaprobes.) 4589 */ 4590 if (arg0 != (uint64_t)(uintptr_t)state) 4591 continue; 4592 } 4593 4594 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 4595 /* 4596 * We're not currently active. If our provider isn't 4597 * the dtrace pseudo provider, we're not interested. 4598 */ 4599 if (prov != dtrace_provider) 4600 continue; 4601 4602 /* 4603 * Now we must further check if we are in the BEGIN 4604 * probe. If we are, we will only continue processing 4605 * if we're still in WARMUP -- if one BEGIN enabling 4606 * has invoked the exit() action, we don't want to 4607 * evaluate subsequent BEGIN enablings. 4608 */ 4609 if (probe->dtpr_id == dtrace_probeid_begin && 4610 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 4611 ASSERT(state->dts_activity == 4612 DTRACE_ACTIVITY_DRAINING); 4613 continue; 4614 } 4615 } 4616 4617 if (ecb->dte_cond) { 4618 /* 4619 * If the dte_cond bits indicate that this 4620 * consumer is only allowed to see user-mode firings 4621 * of this probe, call the provider's dtps_usermode() 4622 * entry point to check that the probe was fired 4623 * while in a user context. Skip this ECB if that's 4624 * not the case. 4625 */ 4626 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 4627 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 4628 probe->dtpr_id, probe->dtpr_arg) == 0) 4629 continue; 4630 4631 /* 4632 * This is more subtle than it looks. We have to be 4633 * absolutely certain that CRED() isn't going to 4634 * change out from under us so it's only legit to 4635 * examine that structure if we're in constrained 4636 * situations. Currently, the only times we'll this 4637 * check is if a non-super-user has enabled the 4638 * profile or syscall providers -- providers that 4639 * allow visibility of all processes. For the 4640 * profile case, the check above will ensure that 4641 * we're examining a user context. 4642 */ 4643 if (ecb->dte_cond & DTRACE_COND_OWNER) { 4644 uid_t uid = ecb->dte_state->dts_cred.dcr_uid; 4645 gid_t gid = ecb->dte_state->dts_cred.dcr_gid; 4646 cred_t *cr; 4647 proc_t *proc; 4648 4649 if ((cr = CRED()) == NULL || 4650 uid != cr->cr_uid || 4651 uid != cr->cr_ruid || 4652 uid != cr->cr_suid || 4653 gid != cr->cr_gid || 4654 gid != cr->cr_rgid || 4655 gid != cr->cr_sgid || 4656 (proc = ttoproc(curthread)) == NULL || 4657 (proc->p_flag & SNOCD)) 4658 continue; 4659 4660 } 4661 } 4662 4663 if (now - state->dts_alive > dtrace_deadman_timeout) { 4664 /* 4665 * We seem to be dead. Unless we (a) have kernel 4666 * destructive permissions (b) have expicitly enabled 4667 * destructive actions and (c) destructive actions have 4668 * not been disabled, we're going to transition into 4669 * the KILLED state, from which no further processing 4670 * on this state will be performed. 4671 */ 4672 if (!dtrace_priv_kernel_destructive(state) || 4673 !state->dts_cred.dcr_destructive || 4674 dtrace_destructive_disallow) { 4675 void *activity = &state->dts_activity; 4676 dtrace_activity_t current; 4677 4678 do { 4679 current = state->dts_activity; 4680 } while (dtrace_cas32(activity, current, 4681 DTRACE_ACTIVITY_KILLED) != current); 4682 4683 continue; 4684 } 4685 } 4686 4687 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 4688 ecb->dte_alignment, state, &mstate)) < 0) 4689 continue; 4690 4691 tomax = buf->dtb_tomax; 4692 ASSERT(tomax != NULL); 4693 4694 if (ecb->dte_size != 0) 4695 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 4696 4697 mstate.dtms_epid = ecb->dte_epid; 4698 mstate.dtms_present |= DTRACE_MSTATE_EPID; 4699 4700 if (pred != NULL) { 4701 dtrace_difo_t *dp = pred->dtp_difo; 4702 int rval; 4703 4704 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 4705 4706 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 4707 dtrace_cacheid_t cid = probe->dtpr_predcache; 4708 4709 if (cid != DTRACE_CACHEIDNONE && !onintr) { 4710 /* 4711 * Update the predicate cache... 4712 */ 4713 ASSERT(cid == pred->dtp_cacheid); 4714 curthread->t_predcache = cid; 4715 } 4716 4717 continue; 4718 } 4719 } 4720 4721 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 4722 act != NULL; act = act->dta_next) { 4723 size_t valoffs; 4724 dtrace_difo_t *dp; 4725 dtrace_recdesc_t *rec = &act->dta_rec; 4726 4727 size = rec->dtrd_size; 4728 valoffs = offs + rec->dtrd_offset; 4729 4730 if (DTRACEACT_ISAGG(act->dta_kind)) { 4731 uint64_t v = 0xbad; 4732 dtrace_aggregation_t *agg; 4733 4734 agg = (dtrace_aggregation_t *)act; 4735 4736 if ((dp = act->dta_difo) != NULL) 4737 v = dtrace_dif_emulate(dp, 4738 &mstate, vstate, state); 4739 4740 if (*flags & CPU_DTRACE_ERROR) 4741 continue; 4742 4743 /* 4744 * Note that we always pass the expression 4745 * value from the previous iteration of the 4746 * action loop. This value will only be used 4747 * if there is an expression argument to the 4748 * aggregating action, denoted by the 4749 * dtag_hasarg field. 4750 */ 4751 dtrace_aggregate(agg, buf, 4752 offs, aggbuf, v, val); 4753 continue; 4754 } 4755 4756 switch (act->dta_kind) { 4757 case DTRACEACT_STOP: 4758 if (dtrace_priv_proc_destructive(state)) 4759 dtrace_action_stop(); 4760 continue; 4761 4762 case DTRACEACT_BREAKPOINT: 4763 if (dtrace_priv_kernel_destructive(state)) 4764 dtrace_action_breakpoint(ecb); 4765 continue; 4766 4767 case DTRACEACT_PANIC: 4768 if (dtrace_priv_kernel_destructive(state)) 4769 dtrace_action_panic(ecb); 4770 continue; 4771 4772 case DTRACEACT_STACK: 4773 if (!dtrace_priv_kernel(state)) 4774 continue; 4775 4776 dtrace_getpcstack((pc_t *)(tomax + valoffs), 4777 size / sizeof (pc_t), probe->dtpr_aframes, 4778 DTRACE_ANCHORED(probe) ? NULL : 4779 (uint32_t *)arg0); 4780 4781 continue; 4782 4783 case DTRACEACT_JSTACK: 4784 case DTRACEACT_USTACK: 4785 if (!dtrace_priv_proc(state)) 4786 continue; 4787 4788 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 4789 curproc->p_dtrace_helpers != NULL) { 4790 /* 4791 * This is the slow path -- we have 4792 * allocated string space, and we're 4793 * getting the stack of a process that 4794 * has helpers. Call into a separate 4795 * routine to perform this processing. 4796 */ 4797 dtrace_action_ustack(&mstate, state, 4798 (uint64_t *)(tomax + valoffs), 4799 rec->dtrd_arg); 4800 continue; 4801 } 4802 4803 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4804 dtrace_getupcstack((uint64_t *) 4805 (tomax + valoffs), 4806 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 4807 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4808 continue; 4809 4810 default: 4811 break; 4812 } 4813 4814 dp = act->dta_difo; 4815 ASSERT(dp != NULL); 4816 4817 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 4818 4819 if (*flags & CPU_DTRACE_ERROR) 4820 continue; 4821 4822 switch (act->dta_kind) { 4823 case DTRACEACT_SPECULATE: 4824 ASSERT(buf == &state->dts_buffer[cpuid]); 4825 buf = dtrace_speculation_buffer(state, 4826 cpuid, val); 4827 4828 if (buf == NULL) { 4829 *flags |= CPU_DTRACE_DROP; 4830 continue; 4831 } 4832 4833 offs = dtrace_buffer_reserve(buf, 4834 ecb->dte_needed, ecb->dte_alignment, 4835 state, NULL); 4836 4837 if (offs < 0) { 4838 *flags |= CPU_DTRACE_DROP; 4839 continue; 4840 } 4841 4842 tomax = buf->dtb_tomax; 4843 ASSERT(tomax != NULL); 4844 4845 if (ecb->dte_size != 0) 4846 DTRACE_STORE(uint32_t, tomax, offs, 4847 ecb->dte_epid); 4848 continue; 4849 4850 case DTRACEACT_CHILL: 4851 if (dtrace_priv_kernel_destructive(state)) 4852 dtrace_action_chill(&mstate, val); 4853 continue; 4854 4855 case DTRACEACT_RAISE: 4856 if (dtrace_priv_proc_destructive(state)) 4857 dtrace_action_raise(val); 4858 continue; 4859 4860 case DTRACEACT_COMMIT: 4861 ASSERT(!committed); 4862 4863 /* 4864 * We need to commit our buffer state. 4865 */ 4866 if (ecb->dte_size) 4867 buf->dtb_offset = offs + ecb->dte_size; 4868 buf = &state->dts_buffer[cpuid]; 4869 dtrace_speculation_commit(state, cpuid, val); 4870 committed = 1; 4871 continue; 4872 4873 case DTRACEACT_DISCARD: 4874 dtrace_speculation_discard(state, cpuid, val); 4875 continue; 4876 4877 case DTRACEACT_DIFEXPR: 4878 case DTRACEACT_LIBACT: 4879 case DTRACEACT_PRINTF: 4880 case DTRACEACT_PRINTA: 4881 case DTRACEACT_SYSTEM: 4882 case DTRACEACT_FREOPEN: 4883 break; 4884 4885 case DTRACEACT_SYM: 4886 case DTRACEACT_MOD: 4887 if (!dtrace_priv_kernel(state)) 4888 continue; 4889 break; 4890 4891 case DTRACEACT_USYM: 4892 case DTRACEACT_UMOD: 4893 case DTRACEACT_UADDR: { 4894 struct pid *pid = curthread->t_procp->p_pidp; 4895 4896 if (!dtrace_priv_proc(state)) 4897 continue; 4898 4899 DTRACE_STORE(uint64_t, tomax, 4900 valoffs, (uint64_t)pid->pid_id); 4901 DTRACE_STORE(uint64_t, tomax, 4902 valoffs + sizeof (uint64_t), val); 4903 4904 continue; 4905 } 4906 4907 case DTRACEACT_EXIT: { 4908 /* 4909 * For the exit action, we are going to attempt 4910 * to atomically set our activity to be 4911 * draining. If this fails (either because 4912 * another CPU has beat us to the exit action, 4913 * or because our current activity is something 4914 * other than ACTIVE or WARMUP), we will 4915 * continue. This assures that the exit action 4916 * can be successfully recorded at most once 4917 * when we're in the ACTIVE state. If we're 4918 * encountering the exit() action while in 4919 * COOLDOWN, however, we want to honor the new 4920 * status code. (We know that we're the only 4921 * thread in COOLDOWN, so there is no race.) 4922 */ 4923 void *activity = &state->dts_activity; 4924 dtrace_activity_t current = state->dts_activity; 4925 4926 if (current == DTRACE_ACTIVITY_COOLDOWN) 4927 break; 4928 4929 if (current != DTRACE_ACTIVITY_WARMUP) 4930 current = DTRACE_ACTIVITY_ACTIVE; 4931 4932 if (dtrace_cas32(activity, current, 4933 DTRACE_ACTIVITY_DRAINING) != current) { 4934 *flags |= CPU_DTRACE_DROP; 4935 continue; 4936 } 4937 4938 break; 4939 } 4940 4941 default: 4942 ASSERT(0); 4943 } 4944 4945 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 4946 uintptr_t end = valoffs + size; 4947 4948 /* 4949 * If this is a string, we're going to only 4950 * load until we find the zero byte -- after 4951 * which we'll store zero bytes. 4952 */ 4953 if (dp->dtdo_rtype.dtdt_kind == 4954 DIF_TYPE_STRING) { 4955 char c = '\0' + 1; 4956 size_t s; 4957 4958 for (s = 0; s < size; s++) { 4959 if (c != '\0') 4960 c = dtrace_load8(val++); 4961 4962 DTRACE_STORE(uint8_t, tomax, 4963 valoffs++, c); 4964 } 4965 4966 continue; 4967 } 4968 4969 while (valoffs < end) { 4970 DTRACE_STORE(uint8_t, tomax, valoffs++, 4971 dtrace_load8(val++)); 4972 } 4973 4974 continue; 4975 } 4976 4977 switch (size) { 4978 case 0: 4979 break; 4980 4981 case sizeof (uint8_t): 4982 DTRACE_STORE(uint8_t, tomax, valoffs, val); 4983 break; 4984 case sizeof (uint16_t): 4985 DTRACE_STORE(uint16_t, tomax, valoffs, val); 4986 break; 4987 case sizeof (uint32_t): 4988 DTRACE_STORE(uint32_t, tomax, valoffs, val); 4989 break; 4990 case sizeof (uint64_t): 4991 DTRACE_STORE(uint64_t, tomax, valoffs, val); 4992 break; 4993 default: 4994 /* 4995 * Any other size should have been returned by 4996 * reference, not by value. 4997 */ 4998 ASSERT(0); 4999 break; 5000 } 5001 } 5002 5003 if (*flags & CPU_DTRACE_DROP) 5004 continue; 5005 5006 if (*flags & CPU_DTRACE_FAULT) { 5007 int ndx; 5008 dtrace_action_t *err; 5009 5010 buf->dtb_errors++; 5011 5012 if (probe->dtpr_id == dtrace_probeid_error) { 5013 /* 5014 * There's nothing we can do -- we had an 5015 * error on the error probe. We bump an 5016 * error counter to at least indicate that 5017 * this condition happened. 5018 */ 5019 dtrace_error(&state->dts_dblerrors); 5020 continue; 5021 } 5022 5023 if (vtime) { 5024 /* 5025 * Before recursing on dtrace_probe(), we 5026 * need to explicitly clear out our start 5027 * time to prevent it from being accumulated 5028 * into t_dtrace_vtime. 5029 */ 5030 curthread->t_dtrace_start = 0; 5031 } 5032 5033 /* 5034 * Iterate over the actions to figure out which action 5035 * we were processing when we experienced the error. 5036 * Note that act points _past_ the faulting action; if 5037 * act is ecb->dte_action, the fault was in the 5038 * predicate, if it's ecb->dte_action->dta_next it's 5039 * in action #1, and so on. 5040 */ 5041 for (err = ecb->dte_action, ndx = 0; 5042 err != act; err = err->dta_next, ndx++) 5043 continue; 5044 5045 dtrace_probe_error(state, ecb->dte_epid, ndx, 5046 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5047 mstate.dtms_fltoffs : -1, 5048 (*flags & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : 5049 (*flags & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : 5050 (*flags & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : 5051 (*flags & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : 5052 (*flags & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : 5053 (*flags & CPU_DTRACE_TUPOFLOW) ? 5054 DTRACEFLT_TUPOFLOW : 5055 (*flags & CPU_DTRACE_BADALIGN) ? 5056 DTRACEFLT_BADALIGN : 5057 (*flags & CPU_DTRACE_NOSCRATCH) ? 5058 DTRACEFLT_NOSCRATCH : DTRACEFLT_UNKNOWN, 5059 cpu_core[cpuid].cpuc_dtrace_illval); 5060 5061 continue; 5062 } 5063 5064 if (!committed) 5065 buf->dtb_offset = offs + ecb->dte_size; 5066 } 5067 5068 if (vtime) 5069 curthread->t_dtrace_start = dtrace_gethrtime(); 5070 5071 dtrace_interrupt_enable(cookie); 5072 } 5073 5074 /* 5075 * DTrace Probe Hashing Functions 5076 * 5077 * The functions in this section (and indeed, the functions in remaining 5078 * sections) are not _called_ from probe context. (Any exceptions to this are 5079 * marked with a "Note:".) Rather, they are called from elsewhere in the 5080 * DTrace framework to look-up probes in, add probes to and remove probes from 5081 * the DTrace probe hashes. (Each probe is hashed by each element of the 5082 * probe tuple -- allowing for fast lookups, regardless of what was 5083 * specified.) 5084 */ 5085 static uint_t 5086 dtrace_hash_str(char *p) 5087 { 5088 unsigned int g; 5089 uint_t hval = 0; 5090 5091 while (*p) { 5092 hval = (hval << 4) + *p++; 5093 if ((g = (hval & 0xf0000000)) != 0) 5094 hval ^= g >> 24; 5095 hval &= ~g; 5096 } 5097 return (hval); 5098 } 5099 5100 static dtrace_hash_t * 5101 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5102 { 5103 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5104 5105 hash->dth_stroffs = stroffs; 5106 hash->dth_nextoffs = nextoffs; 5107 hash->dth_prevoffs = prevoffs; 5108 5109 hash->dth_size = 1; 5110 hash->dth_mask = hash->dth_size - 1; 5111 5112 hash->dth_tab = kmem_zalloc(hash->dth_size * 5113 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5114 5115 return (hash); 5116 } 5117 5118 static void 5119 dtrace_hash_destroy(dtrace_hash_t *hash) 5120 { 5121 #ifdef DEBUG 5122 int i; 5123 5124 for (i = 0; i < hash->dth_size; i++) 5125 ASSERT(hash->dth_tab[i] == NULL); 5126 #endif 5127 5128 kmem_free(hash->dth_tab, 5129 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5130 kmem_free(hash, sizeof (dtrace_hash_t)); 5131 } 5132 5133 static void 5134 dtrace_hash_resize(dtrace_hash_t *hash) 5135 { 5136 int size = hash->dth_size, i, ndx; 5137 int new_size = hash->dth_size << 1; 5138 int new_mask = new_size - 1; 5139 dtrace_hashbucket_t **new_tab, *bucket, *next; 5140 5141 ASSERT((new_size & new_mask) == 0); 5142 5143 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5144 5145 for (i = 0; i < size; i++) { 5146 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5147 dtrace_probe_t *probe = bucket->dthb_chain; 5148 5149 ASSERT(probe != NULL); 5150 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5151 5152 next = bucket->dthb_next; 5153 bucket->dthb_next = new_tab[ndx]; 5154 new_tab[ndx] = bucket; 5155 } 5156 } 5157 5158 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5159 hash->dth_tab = new_tab; 5160 hash->dth_size = new_size; 5161 hash->dth_mask = new_mask; 5162 } 5163 5164 static void 5165 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5166 { 5167 int hashval = DTRACE_HASHSTR(hash, new); 5168 int ndx = hashval & hash->dth_mask; 5169 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5170 dtrace_probe_t **nextp, **prevp; 5171 5172 for (; bucket != NULL; bucket = bucket->dthb_next) { 5173 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5174 goto add; 5175 } 5176 5177 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5178 dtrace_hash_resize(hash); 5179 dtrace_hash_add(hash, new); 5180 return; 5181 } 5182 5183 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5184 bucket->dthb_next = hash->dth_tab[ndx]; 5185 hash->dth_tab[ndx] = bucket; 5186 hash->dth_nbuckets++; 5187 5188 add: 5189 nextp = DTRACE_HASHNEXT(hash, new); 5190 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5191 *nextp = bucket->dthb_chain; 5192 5193 if (bucket->dthb_chain != NULL) { 5194 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5195 ASSERT(*prevp == NULL); 5196 *prevp = new; 5197 } 5198 5199 bucket->dthb_chain = new; 5200 bucket->dthb_len++; 5201 } 5202 5203 static dtrace_probe_t * 5204 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5205 { 5206 int hashval = DTRACE_HASHSTR(hash, template); 5207 int ndx = hashval & hash->dth_mask; 5208 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5209 5210 for (; bucket != NULL; bucket = bucket->dthb_next) { 5211 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5212 return (bucket->dthb_chain); 5213 } 5214 5215 return (NULL); 5216 } 5217 5218 static int 5219 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5220 { 5221 int hashval = DTRACE_HASHSTR(hash, template); 5222 int ndx = hashval & hash->dth_mask; 5223 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5224 5225 for (; bucket != NULL; bucket = bucket->dthb_next) { 5226 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5227 return (bucket->dthb_len); 5228 } 5229 5230 return (NULL); 5231 } 5232 5233 static void 5234 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5235 { 5236 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5237 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5238 5239 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5240 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5241 5242 /* 5243 * Find the bucket that we're removing this probe from. 5244 */ 5245 for (; bucket != NULL; bucket = bucket->dthb_next) { 5246 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5247 break; 5248 } 5249 5250 ASSERT(bucket != NULL); 5251 5252 if (*prevp == NULL) { 5253 if (*nextp == NULL) { 5254 /* 5255 * The removed probe was the only probe on this 5256 * bucket; we need to remove the bucket. 5257 */ 5258 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5259 5260 ASSERT(bucket->dthb_chain == probe); 5261 ASSERT(b != NULL); 5262 5263 if (b == bucket) { 5264 hash->dth_tab[ndx] = bucket->dthb_next; 5265 } else { 5266 while (b->dthb_next != bucket) 5267 b = b->dthb_next; 5268 b->dthb_next = bucket->dthb_next; 5269 } 5270 5271 ASSERT(hash->dth_nbuckets > 0); 5272 hash->dth_nbuckets--; 5273 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5274 return; 5275 } 5276 5277 bucket->dthb_chain = *nextp; 5278 } else { 5279 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5280 } 5281 5282 if (*nextp != NULL) 5283 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5284 } 5285 5286 /* 5287 * DTrace Utility Functions 5288 * 5289 * These are random utility functions that are _not_ called from probe context. 5290 */ 5291 static int 5292 dtrace_badattr(const dtrace_attribute_t *a) 5293 { 5294 return (a->dtat_name > DTRACE_STABILITY_MAX || 5295 a->dtat_data > DTRACE_STABILITY_MAX || 5296 a->dtat_class > DTRACE_CLASS_MAX); 5297 } 5298 5299 /* 5300 * Return a duplicate copy of a string. If the specified string is NULL, 5301 * this function returns a zero-length string. 5302 */ 5303 static char * 5304 dtrace_strdup(const char *str) 5305 { 5306 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5307 5308 if (str != NULL) 5309 (void) strcpy(new, str); 5310 5311 return (new); 5312 } 5313 5314 #define DTRACE_ISALPHA(c) \ 5315 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5316 5317 static int 5318 dtrace_badname(const char *s) 5319 { 5320 char c; 5321 5322 if (s == NULL || (c = *s++) == '\0') 5323 return (0); 5324 5325 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5326 return (1); 5327 5328 while ((c = *s++) != '\0') { 5329 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5330 c != '-' && c != '_' && c != '.' && c != '`') 5331 return (1); 5332 } 5333 5334 return (0); 5335 } 5336 5337 static void 5338 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp) 5339 { 5340 uint32_t priv; 5341 5342 *uidp = crgetuid(cr); 5343 if (PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5344 priv = DTRACE_PRIV_ALL; 5345 } else { 5346 priv = 0; 5347 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 5348 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 5349 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 5350 priv |= DTRACE_PRIV_USER; 5351 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 5352 priv |= DTRACE_PRIV_PROC; 5353 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 5354 priv |= DTRACE_PRIV_OWNER; 5355 } 5356 5357 *privp = priv; 5358 } 5359 5360 #ifdef DTRACE_ERRDEBUG 5361 static void 5362 dtrace_errdebug(const char *str) 5363 { 5364 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 5365 int occupied = 0; 5366 5367 mutex_enter(&dtrace_errlock); 5368 dtrace_errlast = str; 5369 dtrace_errthread = curthread; 5370 5371 while (occupied++ < DTRACE_ERRHASHSZ) { 5372 if (dtrace_errhash[hval].dter_msg == str) { 5373 dtrace_errhash[hval].dter_count++; 5374 goto out; 5375 } 5376 5377 if (dtrace_errhash[hval].dter_msg != NULL) { 5378 hval = (hval + 1) % DTRACE_ERRHASHSZ; 5379 continue; 5380 } 5381 5382 dtrace_errhash[hval].dter_msg = str; 5383 dtrace_errhash[hval].dter_count = 1; 5384 goto out; 5385 } 5386 5387 panic("dtrace: undersized error hash"); 5388 out: 5389 mutex_exit(&dtrace_errlock); 5390 } 5391 #endif 5392 5393 /* 5394 * DTrace Matching Functions 5395 * 5396 * These functions are used to match groups of probes, given some elements of 5397 * a probe tuple, or some globbed expressions for elements of a probe tuple. 5398 */ 5399 static int 5400 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid) 5401 { 5402 if (priv != DTRACE_PRIV_ALL) { 5403 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 5404 uint32_t match = priv & ppriv; 5405 5406 /* 5407 * No PRIV_DTRACE_* privileges... 5408 */ 5409 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 5410 DTRACE_PRIV_KERNEL)) == 0) 5411 return (0); 5412 5413 /* 5414 * No matching bits, but there were bits to match... 5415 */ 5416 if (match == 0 && ppriv != 0) 5417 return (0); 5418 5419 /* 5420 * Need to have permissions to the process, but don't... 5421 */ 5422 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 5423 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) 5424 return (0); 5425 } 5426 5427 return (1); 5428 } 5429 5430 /* 5431 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 5432 * consists of input pattern strings and an ops-vector to evaluate them. 5433 * This function returns >0 for match, 0 for no match, and <0 for error. 5434 */ 5435 static int 5436 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 5437 uint32_t priv, uid_t uid) 5438 { 5439 dtrace_provider_t *pvp = prp->dtpr_provider; 5440 int rv; 5441 5442 if (pvp->dtpv_defunct) 5443 return (0); 5444 5445 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 5446 return (rv); 5447 5448 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 5449 return (rv); 5450 5451 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 5452 return (rv); 5453 5454 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 5455 return (rv); 5456 5457 if (dtrace_match_priv(prp, priv, uid) == 0) 5458 return (0); 5459 5460 return (rv); 5461 } 5462 5463 /* 5464 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 5465 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 5466 * libc's version, the kernel version only applies to 8-bit ASCII strings. 5467 * In addition, all of the recursion cases except for '*' matching have been 5468 * unwound. For '*', we still implement recursive evaluation, but a depth 5469 * counter is maintained and matching is aborted if we recurse too deep. 5470 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 5471 */ 5472 static int 5473 dtrace_match_glob(const char *s, const char *p, int depth) 5474 { 5475 const char *olds; 5476 char s1, c; 5477 int gs; 5478 5479 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 5480 return (-1); 5481 5482 if (s == NULL) 5483 s = ""; /* treat NULL as empty string */ 5484 5485 top: 5486 olds = s; 5487 s1 = *s++; 5488 5489 if (p == NULL) 5490 return (0); 5491 5492 if ((c = *p++) == '\0') 5493 return (s1 == '\0'); 5494 5495 switch (c) { 5496 case '[': { 5497 int ok = 0, notflag = 0; 5498 char lc = '\0'; 5499 5500 if (s1 == '\0') 5501 return (0); 5502 5503 if (*p == '!') { 5504 notflag = 1; 5505 p++; 5506 } 5507 5508 if ((c = *p++) == '\0') 5509 return (0); 5510 5511 do { 5512 if (c == '-' && lc != '\0' && *p != ']') { 5513 if ((c = *p++) == '\0') 5514 return (0); 5515 if (c == '\\' && (c = *p++) == '\0') 5516 return (0); 5517 5518 if (notflag) { 5519 if (s1 < lc || s1 > c) 5520 ok++; 5521 else 5522 return (0); 5523 } else if (lc <= s1 && s1 <= c) 5524 ok++; 5525 5526 } else if (c == '\\' && (c = *p++) == '\0') 5527 return (0); 5528 5529 lc = c; /* save left-hand 'c' for next iteration */ 5530 5531 if (notflag) { 5532 if (s1 != c) 5533 ok++; 5534 else 5535 return (0); 5536 } else if (s1 == c) 5537 ok++; 5538 5539 if ((c = *p++) == '\0') 5540 return (0); 5541 5542 } while (c != ']'); 5543 5544 if (ok) 5545 goto top; 5546 5547 return (0); 5548 } 5549 5550 case '\\': 5551 if ((c = *p++) == '\0') 5552 return (0); 5553 /*FALLTHRU*/ 5554 5555 default: 5556 if (c != s1) 5557 return (0); 5558 /*FALLTHRU*/ 5559 5560 case '?': 5561 if (s1 != '\0') 5562 goto top; 5563 return (0); 5564 5565 case '*': 5566 while (*p == '*') 5567 p++; /* consecutive *'s are identical to a single one */ 5568 5569 if (*p == '\0') 5570 return (1); 5571 5572 for (s = olds; *s != '\0'; s++) { 5573 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 5574 return (gs); 5575 } 5576 5577 return (0); 5578 } 5579 } 5580 5581 /*ARGSUSED*/ 5582 static int 5583 dtrace_match_string(const char *s, const char *p, int depth) 5584 { 5585 return (s != NULL && strcmp(s, p) == 0); 5586 } 5587 5588 /*ARGSUSED*/ 5589 static int 5590 dtrace_match_nul(const char *s, const char *p, int depth) 5591 { 5592 return (1); /* always match the empty pattern */ 5593 } 5594 5595 /*ARGSUSED*/ 5596 static int 5597 dtrace_match_nonzero(const char *s, const char *p, int depth) 5598 { 5599 return (s != NULL && s[0] != '\0'); 5600 } 5601 5602 static int 5603 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 5604 int (*matched)(dtrace_probe_t *, void *), void *arg) 5605 { 5606 dtrace_probe_t template, *probe; 5607 dtrace_hash_t *hash = NULL; 5608 int len, best = INT_MAX, nmatched = 0; 5609 dtrace_id_t i; 5610 5611 ASSERT(MUTEX_HELD(&dtrace_lock)); 5612 5613 /* 5614 * If the probe ID is specified in the key, just lookup by ID and 5615 * invoke the match callback once if a matching probe is found. 5616 */ 5617 if (pkp->dtpk_id != DTRACE_IDNONE) { 5618 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 5619 dtrace_match_probe(probe, pkp, priv, uid) > 0) { 5620 (void) (*matched)(probe, arg); 5621 nmatched++; 5622 } 5623 return (nmatched); 5624 } 5625 5626 template.dtpr_mod = (char *)pkp->dtpk_mod; 5627 template.dtpr_func = (char *)pkp->dtpk_func; 5628 template.dtpr_name = (char *)pkp->dtpk_name; 5629 5630 /* 5631 * We want to find the most distinct of the module name, function 5632 * name, and name. So for each one that is not a glob pattern or 5633 * empty string, we perform a lookup in the corresponding hash and 5634 * use the hash table with the fewest collisions to do our search. 5635 */ 5636 if (pkp->dtpk_mmatch == &dtrace_match_string && 5637 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 5638 best = len; 5639 hash = dtrace_bymod; 5640 } 5641 5642 if (pkp->dtpk_fmatch == &dtrace_match_string && 5643 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 5644 best = len; 5645 hash = dtrace_byfunc; 5646 } 5647 5648 if (pkp->dtpk_nmatch == &dtrace_match_string && 5649 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 5650 best = len; 5651 hash = dtrace_byname; 5652 } 5653 5654 /* 5655 * If we did not select a hash table, iterate over every probe and 5656 * invoke our callback for each one that matches our input probe key. 5657 */ 5658 if (hash == NULL) { 5659 for (i = 0; i < dtrace_nprobes; i++) { 5660 if ((probe = dtrace_probes[i]) == NULL || 5661 dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5662 continue; 5663 5664 nmatched++; 5665 5666 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5667 break; 5668 } 5669 5670 return (nmatched); 5671 } 5672 5673 /* 5674 * If we selected a hash table, iterate over each probe of the same key 5675 * name and invoke the callback for every probe that matches the other 5676 * attributes of our input probe key. 5677 */ 5678 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 5679 probe = *(DTRACE_HASHNEXT(hash, probe))) { 5680 5681 if (dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5682 continue; 5683 5684 nmatched++; 5685 5686 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5687 break; 5688 } 5689 5690 return (nmatched); 5691 } 5692 5693 /* 5694 * Return the function pointer dtrace_probecmp() should use to compare the 5695 * specified pattern with a string. For NULL or empty patterns, we select 5696 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 5697 * For non-empty non-glob strings, we use dtrace_match_string(). 5698 */ 5699 static dtrace_probekey_f * 5700 dtrace_probekey_func(const char *p) 5701 { 5702 char c; 5703 5704 if (p == NULL || *p == '\0') 5705 return (&dtrace_match_nul); 5706 5707 while ((c = *p++) != '\0') { 5708 if (c == '[' || c == '?' || c == '*' || c == '\\') 5709 return (&dtrace_match_glob); 5710 } 5711 5712 return (&dtrace_match_string); 5713 } 5714 5715 /* 5716 * Build a probe comparison key for use with dtrace_match_probe() from the 5717 * given probe description. By convention, a null key only matches anchored 5718 * probes: if each field is the empty string, reset dtpk_fmatch to 5719 * dtrace_match_nonzero(). 5720 */ 5721 static void 5722 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 5723 { 5724 pkp->dtpk_prov = pdp->dtpd_provider; 5725 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 5726 5727 pkp->dtpk_mod = pdp->dtpd_mod; 5728 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 5729 5730 pkp->dtpk_func = pdp->dtpd_func; 5731 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 5732 5733 pkp->dtpk_name = pdp->dtpd_name; 5734 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 5735 5736 pkp->dtpk_id = pdp->dtpd_id; 5737 5738 if (pkp->dtpk_id == DTRACE_IDNONE && 5739 pkp->dtpk_pmatch == &dtrace_match_nul && 5740 pkp->dtpk_mmatch == &dtrace_match_nul && 5741 pkp->dtpk_fmatch == &dtrace_match_nul && 5742 pkp->dtpk_nmatch == &dtrace_match_nul) 5743 pkp->dtpk_fmatch = &dtrace_match_nonzero; 5744 } 5745 5746 /* 5747 * DTrace Provider-to-Framework API Functions 5748 * 5749 * These functions implement much of the Provider-to-Framework API, as 5750 * described in <sys/dtrace.h>. The parts of the API not in this section are 5751 * the functions in the API for probe management (found below), and 5752 * dtrace_probe() itself (found above). 5753 */ 5754 5755 /* 5756 * Register the calling provider with the DTrace framework. This should 5757 * generally be called by DTrace providers in their attach(9E) entry point. 5758 */ 5759 int 5760 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 5761 uid_t uid, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 5762 { 5763 dtrace_provider_t *provider; 5764 5765 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 5766 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5767 "arguments", name ? name : "<NULL>"); 5768 return (EINVAL); 5769 } 5770 5771 if (name[0] == '\0' || dtrace_badname(name)) { 5772 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5773 "provider name", name); 5774 return (EINVAL); 5775 } 5776 5777 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 5778 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 5779 pops->dtps_destroy == NULL || 5780 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 5781 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5782 "provider ops", name); 5783 return (EINVAL); 5784 } 5785 5786 if (dtrace_badattr(&pap->dtpa_provider) || 5787 dtrace_badattr(&pap->dtpa_mod) || 5788 dtrace_badattr(&pap->dtpa_func) || 5789 dtrace_badattr(&pap->dtpa_name) || 5790 dtrace_badattr(&pap->dtpa_args)) { 5791 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5792 "provider attributes", name); 5793 return (EINVAL); 5794 } 5795 5796 if (priv & ~DTRACE_PRIV_ALL) { 5797 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5798 "privilege attributes", name); 5799 return (EINVAL); 5800 } 5801 5802 if ((priv & DTRACE_PRIV_KERNEL) && 5803 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 5804 pops->dtps_usermode == NULL) { 5805 cmn_err(CE_WARN, "failed to register provider '%s': need " 5806 "dtps_usermode() op for given privilege attributes", name); 5807 return (EINVAL); 5808 } 5809 5810 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 5811 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 5812 (void) strcpy(provider->dtpv_name, name); 5813 5814 provider->dtpv_attr = *pap; 5815 provider->dtpv_priv.dtpp_flags = priv; 5816 provider->dtpv_priv.dtpp_uid = uid; 5817 provider->dtpv_pops = *pops; 5818 5819 if (pops->dtps_provide == NULL) { 5820 ASSERT(pops->dtps_provide_module != NULL); 5821 provider->dtpv_pops.dtps_provide = 5822 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 5823 } 5824 5825 if (pops->dtps_provide_module == NULL) { 5826 ASSERT(pops->dtps_provide != NULL); 5827 provider->dtpv_pops.dtps_provide_module = 5828 (void (*)(void *, struct modctl *))dtrace_nullop; 5829 } 5830 5831 if (pops->dtps_suspend == NULL) { 5832 ASSERT(pops->dtps_resume == NULL); 5833 provider->dtpv_pops.dtps_suspend = 5834 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5835 provider->dtpv_pops.dtps_resume = 5836 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5837 } 5838 5839 provider->dtpv_arg = arg; 5840 *idp = (dtrace_provider_id_t)provider; 5841 5842 if (pops == &dtrace_provider_ops) { 5843 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5844 ASSERT(MUTEX_HELD(&dtrace_lock)); 5845 ASSERT(dtrace_anon.dta_enabling == NULL); 5846 5847 /* 5848 * We make sure that the DTrace provider is at the head of 5849 * the provider chain. 5850 */ 5851 provider->dtpv_next = dtrace_provider; 5852 dtrace_provider = provider; 5853 return (0); 5854 } 5855 5856 mutex_enter(&dtrace_provider_lock); 5857 mutex_enter(&dtrace_lock); 5858 5859 /* 5860 * If there is at least one provider registered, we'll add this 5861 * provider after the first provider. 5862 */ 5863 if (dtrace_provider != NULL) { 5864 provider->dtpv_next = dtrace_provider->dtpv_next; 5865 dtrace_provider->dtpv_next = provider; 5866 } else { 5867 dtrace_provider = provider; 5868 } 5869 5870 if (dtrace_retained != NULL) { 5871 dtrace_enabling_provide(provider); 5872 5873 /* 5874 * Now we need to call dtrace_enabling_matchall() -- which 5875 * will acquire cpu_lock and dtrace_lock. We therefore need 5876 * to drop all of our locks before calling into it... 5877 */ 5878 mutex_exit(&dtrace_lock); 5879 mutex_exit(&dtrace_provider_lock); 5880 dtrace_enabling_matchall(); 5881 5882 return (0); 5883 } 5884 5885 mutex_exit(&dtrace_lock); 5886 mutex_exit(&dtrace_provider_lock); 5887 5888 return (0); 5889 } 5890 5891 /* 5892 * Unregister the specified provider from the DTrace framework. This should 5893 * generally be called by DTrace providers in their detach(9E) entry point. 5894 */ 5895 int 5896 dtrace_unregister(dtrace_provider_id_t id) 5897 { 5898 dtrace_provider_t *old = (dtrace_provider_t *)id; 5899 dtrace_provider_t *prev = NULL; 5900 int i, self = 0; 5901 dtrace_probe_t *probe, *first = NULL; 5902 5903 if (old->dtpv_pops.dtps_enable == 5904 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 5905 /* 5906 * If DTrace itself is the provider, we're called with locks 5907 * already held. 5908 */ 5909 ASSERT(old == dtrace_provider); 5910 ASSERT(dtrace_devi != NULL); 5911 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5912 ASSERT(MUTEX_HELD(&dtrace_lock)); 5913 self = 1; 5914 5915 if (dtrace_provider->dtpv_next != NULL) { 5916 /* 5917 * There's another provider here; return failure. 5918 */ 5919 return (EBUSY); 5920 } 5921 } else { 5922 mutex_enter(&dtrace_provider_lock); 5923 mutex_enter(&mod_lock); 5924 mutex_enter(&dtrace_lock); 5925 } 5926 5927 /* 5928 * If anyone has /dev/dtrace open, or if there are anonymous enabled 5929 * probes, we refuse to let providers slither away, unless this 5930 * provider has already been explicitly invalidated. 5931 */ 5932 if (!old->dtpv_defunct && 5933 (dtrace_opens || (dtrace_anon.dta_state != NULL && 5934 dtrace_anon.dta_state->dts_necbs > 0))) { 5935 if (!self) { 5936 mutex_exit(&dtrace_lock); 5937 mutex_exit(&mod_lock); 5938 mutex_exit(&dtrace_provider_lock); 5939 } 5940 return (EBUSY); 5941 } 5942 5943 /* 5944 * Attempt to destroy the probes associated with this provider. 5945 */ 5946 for (i = 0; i < dtrace_nprobes; i++) { 5947 if ((probe = dtrace_probes[i]) == NULL) 5948 continue; 5949 5950 if (probe->dtpr_provider != old) 5951 continue; 5952 5953 if (probe->dtpr_ecb == NULL) 5954 continue; 5955 5956 /* 5957 * We have at least one ECB; we can't remove this provider. 5958 */ 5959 if (!self) { 5960 mutex_exit(&dtrace_lock); 5961 mutex_exit(&mod_lock); 5962 mutex_exit(&dtrace_provider_lock); 5963 } 5964 return (EBUSY); 5965 } 5966 5967 /* 5968 * All of the probes for this provider are disabled; we can safely 5969 * remove all of them from their hash chains and from the probe array. 5970 */ 5971 for (i = 0; i < dtrace_nprobes; i++) { 5972 if ((probe = dtrace_probes[i]) == NULL) 5973 continue; 5974 5975 if (probe->dtpr_provider != old) 5976 continue; 5977 5978 dtrace_probes[i] = NULL; 5979 5980 dtrace_hash_remove(dtrace_bymod, probe); 5981 dtrace_hash_remove(dtrace_byfunc, probe); 5982 dtrace_hash_remove(dtrace_byname, probe); 5983 5984 if (first == NULL) { 5985 first = probe; 5986 probe->dtpr_nextmod = NULL; 5987 } else { 5988 probe->dtpr_nextmod = first; 5989 first = probe; 5990 } 5991 } 5992 5993 /* 5994 * The provider's probes have been removed from the hash chains and 5995 * from the probe array. Now issue a dtrace_sync() to be sure that 5996 * everyone has cleared out from any probe array processing. 5997 */ 5998 dtrace_sync(); 5999 6000 for (probe = first; probe != NULL; probe = first) { 6001 first = probe->dtpr_nextmod; 6002 6003 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6004 probe->dtpr_arg); 6005 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6006 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6007 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6008 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6009 kmem_free(probe, sizeof (dtrace_probe_t)); 6010 } 6011 6012 if ((prev = dtrace_provider) == old) { 6013 ASSERT(self || dtrace_devi == NULL); 6014 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6015 dtrace_provider = old->dtpv_next; 6016 } else { 6017 while (prev != NULL && prev->dtpv_next != old) 6018 prev = prev->dtpv_next; 6019 6020 if (prev == NULL) { 6021 panic("attempt to unregister non-existent " 6022 "dtrace provider %p\n", (void *)id); 6023 } 6024 6025 prev->dtpv_next = old->dtpv_next; 6026 } 6027 6028 if (!self) { 6029 mutex_exit(&dtrace_lock); 6030 mutex_exit(&mod_lock); 6031 mutex_exit(&dtrace_provider_lock); 6032 } 6033 6034 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6035 kmem_free(old, sizeof (dtrace_provider_t)); 6036 6037 return (0); 6038 } 6039 6040 /* 6041 * Invalidate the specified provider. All subsequent probe lookups for the 6042 * specified provider will fail, but its probes will not be removed. 6043 */ 6044 void 6045 dtrace_invalidate(dtrace_provider_id_t id) 6046 { 6047 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6048 6049 ASSERT(pvp->dtpv_pops.dtps_enable != 6050 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6051 6052 mutex_enter(&dtrace_provider_lock); 6053 mutex_enter(&dtrace_lock); 6054 6055 pvp->dtpv_defunct = 1; 6056 6057 mutex_exit(&dtrace_lock); 6058 mutex_exit(&dtrace_provider_lock); 6059 } 6060 6061 /* 6062 * Indicate whether or not DTrace has attached. 6063 */ 6064 int 6065 dtrace_attached(void) 6066 { 6067 /* 6068 * dtrace_provider will be non-NULL iff the DTrace driver has 6069 * attached. (It's non-NULL because DTrace is always itself a 6070 * provider.) 6071 */ 6072 return (dtrace_provider != NULL); 6073 } 6074 6075 /* 6076 * Remove all the unenabled probes for the given provider. This function is 6077 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6078 * -- just as many of its associated probes as it can. 6079 */ 6080 int 6081 dtrace_condense(dtrace_provider_id_t id) 6082 { 6083 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6084 int i; 6085 dtrace_probe_t *probe; 6086 6087 /* 6088 * Make sure this isn't the dtrace provider itself. 6089 */ 6090 ASSERT(prov->dtpv_pops.dtps_enable != 6091 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6092 6093 mutex_enter(&dtrace_provider_lock); 6094 mutex_enter(&dtrace_lock); 6095 6096 /* 6097 * Attempt to destroy the probes associated with this provider. 6098 */ 6099 for (i = 0; i < dtrace_nprobes; i++) { 6100 if ((probe = dtrace_probes[i]) == NULL) 6101 continue; 6102 6103 if (probe->dtpr_provider != prov) 6104 continue; 6105 6106 if (probe->dtpr_ecb != NULL) 6107 continue; 6108 6109 dtrace_probes[i] = NULL; 6110 6111 dtrace_hash_remove(dtrace_bymod, probe); 6112 dtrace_hash_remove(dtrace_byfunc, probe); 6113 dtrace_hash_remove(dtrace_byname, probe); 6114 6115 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 6116 probe->dtpr_arg); 6117 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6118 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6119 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6120 kmem_free(probe, sizeof (dtrace_probe_t)); 6121 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 6122 } 6123 6124 mutex_exit(&dtrace_lock); 6125 mutex_exit(&dtrace_provider_lock); 6126 6127 return (0); 6128 } 6129 6130 /* 6131 * DTrace Probe Management Functions 6132 * 6133 * The functions in this section perform the DTrace probe management, 6134 * including functions to create probes, look-up probes, and call into the 6135 * providers to request that probes be provided. Some of these functions are 6136 * in the Provider-to-Framework API; these functions can be identified by the 6137 * fact that they are not declared "static". 6138 */ 6139 6140 /* 6141 * Create a probe with the specified module name, function name, and name. 6142 */ 6143 dtrace_id_t 6144 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6145 const char *func, const char *name, int aframes, void *arg) 6146 { 6147 dtrace_probe_t *probe, **probes; 6148 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6149 dtrace_id_t id; 6150 6151 if (provider == dtrace_provider) { 6152 ASSERT(MUTEX_HELD(&dtrace_lock)); 6153 } else { 6154 mutex_enter(&dtrace_lock); 6155 } 6156 6157 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6158 VM_BESTFIT | VM_SLEEP); 6159 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6160 6161 probe->dtpr_id = id; 6162 probe->dtpr_gen = dtrace_probegen++; 6163 probe->dtpr_mod = dtrace_strdup(mod); 6164 probe->dtpr_func = dtrace_strdup(func); 6165 probe->dtpr_name = dtrace_strdup(name); 6166 probe->dtpr_arg = arg; 6167 probe->dtpr_aframes = aframes; 6168 probe->dtpr_provider = provider; 6169 6170 dtrace_hash_add(dtrace_bymod, probe); 6171 dtrace_hash_add(dtrace_byfunc, probe); 6172 dtrace_hash_add(dtrace_byname, probe); 6173 6174 if (id - 1 >= dtrace_nprobes) { 6175 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6176 size_t nsize = osize << 1; 6177 6178 if (nsize == 0) { 6179 ASSERT(osize == 0); 6180 ASSERT(dtrace_probes == NULL); 6181 nsize = sizeof (dtrace_probe_t *); 6182 } 6183 6184 probes = kmem_zalloc(nsize, KM_SLEEP); 6185 6186 if (dtrace_probes == NULL) { 6187 ASSERT(osize == 0); 6188 dtrace_probes = probes; 6189 dtrace_nprobes = 1; 6190 } else { 6191 dtrace_probe_t **oprobes = dtrace_probes; 6192 6193 bcopy(oprobes, probes, osize); 6194 dtrace_membar_producer(); 6195 dtrace_probes = probes; 6196 6197 dtrace_sync(); 6198 6199 /* 6200 * All CPUs are now seeing the new probes array; we can 6201 * safely free the old array. 6202 */ 6203 kmem_free(oprobes, osize); 6204 dtrace_nprobes <<= 1; 6205 } 6206 6207 ASSERT(id - 1 < dtrace_nprobes); 6208 } 6209 6210 ASSERT(dtrace_probes[id - 1] == NULL); 6211 dtrace_probes[id - 1] = probe; 6212 6213 if (provider != dtrace_provider) 6214 mutex_exit(&dtrace_lock); 6215 6216 return (id); 6217 } 6218 6219 static dtrace_probe_t * 6220 dtrace_probe_lookup_id(dtrace_id_t id) 6221 { 6222 ASSERT(MUTEX_HELD(&dtrace_lock)); 6223 6224 if (id == 0 || id > dtrace_nprobes) 6225 return (NULL); 6226 6227 return (dtrace_probes[id - 1]); 6228 } 6229 6230 static int 6231 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6232 { 6233 *((dtrace_id_t *)arg) = probe->dtpr_id; 6234 6235 return (DTRACE_MATCH_DONE); 6236 } 6237 6238 /* 6239 * Look up a probe based on provider and one or more of module name, function 6240 * name and probe name. 6241 */ 6242 dtrace_id_t 6243 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6244 const char *func, const char *name) 6245 { 6246 dtrace_probekey_t pkey; 6247 dtrace_id_t id; 6248 int match; 6249 6250 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6251 pkey.dtpk_pmatch = &dtrace_match_string; 6252 pkey.dtpk_mod = mod; 6253 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6254 pkey.dtpk_func = func; 6255 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6256 pkey.dtpk_name = name; 6257 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6258 pkey.dtpk_id = DTRACE_IDNONE; 6259 6260 mutex_enter(&dtrace_lock); 6261 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 6262 dtrace_probe_lookup_match, &id); 6263 mutex_exit(&dtrace_lock); 6264 6265 ASSERT(match == 1 || match == 0); 6266 return (match ? id : 0); 6267 } 6268 6269 /* 6270 * Returns the probe argument associated with the specified probe. 6271 */ 6272 void * 6273 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6274 { 6275 dtrace_probe_t *probe; 6276 void *rval = NULL; 6277 6278 mutex_enter(&dtrace_lock); 6279 6280 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6281 probe->dtpr_provider == (dtrace_provider_t *)id) 6282 rval = probe->dtpr_arg; 6283 6284 mutex_exit(&dtrace_lock); 6285 6286 return (rval); 6287 } 6288 6289 /* 6290 * Copy a probe into a probe description. 6291 */ 6292 static void 6293 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6294 { 6295 bzero(pdp, sizeof (dtrace_probedesc_t)); 6296 pdp->dtpd_id = prp->dtpr_id; 6297 6298 (void) strncpy(pdp->dtpd_provider, 6299 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6300 6301 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6302 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6303 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6304 } 6305 6306 /* 6307 * Called to indicate that a probe -- or probes -- should be provided by a 6308 * specfied provider. If the specified description is NULL, the provider will 6309 * be told to provide all of its probes. (This is done whenever a new 6310 * consumer comes along, or whenever a retained enabling is to be matched.) If 6311 * the specified description is non-NULL, the provider is given the 6312 * opportunity to dynamically provide the specified probe, allowing providers 6313 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6314 * probes.) If the provider is NULL, the operations will be applied to all 6315 * providers; if the provider is non-NULL the operations will only be applied 6316 * to the specified provider. The dtrace_provider_lock must be held, and the 6317 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6318 * will need to grab the dtrace_lock when it reenters the framework through 6319 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6320 */ 6321 static void 6322 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 6323 { 6324 struct modctl *ctl; 6325 int all = 0; 6326 6327 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6328 6329 if (prv == NULL) { 6330 all = 1; 6331 prv = dtrace_provider; 6332 } 6333 6334 do { 6335 /* 6336 * First, call the blanket provide operation. 6337 */ 6338 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 6339 6340 /* 6341 * Now call the per-module provide operation. We will grab 6342 * mod_lock to prevent the list from being modified. Note 6343 * that this also prevents the mod_busy bits from changing. 6344 * (mod_busy can only be changed with mod_lock held.) 6345 */ 6346 mutex_enter(&mod_lock); 6347 6348 ctl = &modules; 6349 do { 6350 if (ctl->mod_busy || ctl->mod_mp == NULL) 6351 continue; 6352 6353 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 6354 6355 } while ((ctl = ctl->mod_next) != &modules); 6356 6357 mutex_exit(&mod_lock); 6358 } while (all && (prv = prv->dtpv_next) != NULL); 6359 } 6360 6361 /* 6362 * Iterate over each probe, and call the Framework-to-Provider API function 6363 * denoted by offs. 6364 */ 6365 static void 6366 dtrace_probe_foreach(uintptr_t offs) 6367 { 6368 dtrace_provider_t *prov; 6369 void (*func)(void *, dtrace_id_t, void *); 6370 dtrace_probe_t *probe; 6371 dtrace_icookie_t cookie; 6372 int i; 6373 6374 /* 6375 * We disable interrupts to walk through the probe array. This is 6376 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 6377 * won't see stale data. 6378 */ 6379 cookie = dtrace_interrupt_disable(); 6380 6381 for (i = 0; i < dtrace_nprobes; i++) { 6382 if ((probe = dtrace_probes[i]) == NULL) 6383 continue; 6384 6385 if (probe->dtpr_ecb == NULL) { 6386 /* 6387 * This probe isn't enabled -- don't call the function. 6388 */ 6389 continue; 6390 } 6391 6392 prov = probe->dtpr_provider; 6393 func = *((void(**)(void *, dtrace_id_t, void *)) 6394 ((uintptr_t)&prov->dtpv_pops + offs)); 6395 6396 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 6397 } 6398 6399 dtrace_interrupt_enable(cookie); 6400 } 6401 6402 static int 6403 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 6404 { 6405 dtrace_probekey_t pkey; 6406 uint32_t priv; 6407 uid_t uid; 6408 6409 ASSERT(MUTEX_HELD(&dtrace_lock)); 6410 dtrace_ecb_create_cache = NULL; 6411 6412 if (desc == NULL) { 6413 /* 6414 * If we're passed a NULL description, we're being asked to 6415 * create an ECB with a NULL probe. 6416 */ 6417 (void) dtrace_ecb_create_enable(NULL, enab); 6418 return (0); 6419 } 6420 6421 dtrace_probekey(desc, &pkey); 6422 dtrace_cred2priv(CRED(), &priv, &uid); 6423 6424 return (dtrace_match(&pkey, priv, uid, dtrace_ecb_create_enable, enab)); 6425 } 6426 6427 /* 6428 * DTrace Helper Provider Functions 6429 */ 6430 static void 6431 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 6432 { 6433 attr->dtat_name = DOF_ATTR_NAME(dofattr); 6434 attr->dtat_data = DOF_ATTR_DATA(dofattr); 6435 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 6436 } 6437 6438 static void 6439 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 6440 const dof_provider_t *dofprov, char *strtab) 6441 { 6442 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 6443 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 6444 dofprov->dofpv_provattr); 6445 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 6446 dofprov->dofpv_modattr); 6447 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 6448 dofprov->dofpv_funcattr); 6449 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 6450 dofprov->dofpv_nameattr); 6451 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 6452 dofprov->dofpv_argsattr); 6453 } 6454 6455 static void 6456 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6457 { 6458 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6459 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6460 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 6461 dof_provider_t *provider; 6462 dof_probe_t *probe; 6463 uint32_t *off; 6464 uint8_t *arg; 6465 char *strtab; 6466 uint_t i, nprobes; 6467 dtrace_helper_provdesc_t dhpv; 6468 dtrace_helper_probedesc_t dhpb; 6469 dtrace_meta_t *meta = dtrace_meta_pid; 6470 dtrace_mops_t *mops = &meta->dtm_mops; 6471 void *parg; 6472 6473 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6474 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6475 provider->dofpv_strtab * dof->dofh_secsize); 6476 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6477 provider->dofpv_probes * dof->dofh_secsize); 6478 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6479 provider->dofpv_prargs * dof->dofh_secsize); 6480 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6481 provider->dofpv_proffs * dof->dofh_secsize); 6482 6483 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6484 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 6485 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 6486 6487 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 6488 6489 /* 6490 * Create the provider. 6491 */ 6492 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6493 6494 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 6495 return; 6496 6497 meta->dtm_count++; 6498 6499 /* 6500 * Create the probes. 6501 */ 6502 for (i = 0; i < nprobes; i++) { 6503 probe = (dof_probe_t *)(uintptr_t)(daddr + 6504 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 6505 6506 dhpb.dthpb_mod = dhp->dofhp_mod; 6507 dhpb.dthpb_func = strtab + probe->dofpr_func; 6508 dhpb.dthpb_name = strtab + probe->dofpr_name; 6509 dhpb.dthpb_base = probe->dofpr_addr; 6510 dhpb.dthpb_offs = off + probe->dofpr_offidx; 6511 dhpb.dthpb_noffs = probe->dofpr_noffs; 6512 dhpb.dthpb_args = arg + probe->dofpr_argidx; 6513 dhpb.dthpb_nargc = probe->dofpr_nargc; 6514 dhpb.dthpb_xargc = probe->dofpr_xargc; 6515 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 6516 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 6517 6518 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 6519 } 6520 } 6521 6522 static void 6523 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 6524 { 6525 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6526 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6527 int i; 6528 6529 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6530 6531 for (i = 0; i < dof->dofh_secnum; i++) { 6532 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6533 dof->dofh_secoff + i * dof->dofh_secsize); 6534 6535 if (sec->dofs_type != DOF_SECT_PROVIDER) 6536 continue; 6537 6538 dtrace_helper_provide_one(dhp, sec, pid); 6539 } 6540 6541 /* 6542 * We may have just created probes, so we must now rematch against 6543 * any retained enablings. Note that this call will acquire both 6544 * cpu_lock and dtrace_lock; the fact that we are holding 6545 * dtrace_meta_lock now is what defines the ordering with respect to 6546 * these three locks. 6547 */ 6548 dtrace_enabling_matchall(); 6549 } 6550 6551 static void 6552 dtrace_helper_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6553 { 6554 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6555 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6556 dof_sec_t *str_sec; 6557 dof_provider_t *provider; 6558 char *strtab; 6559 dtrace_helper_provdesc_t dhpv; 6560 dtrace_meta_t *meta = dtrace_meta_pid; 6561 dtrace_mops_t *mops = &meta->dtm_mops; 6562 6563 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6564 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6565 provider->dofpv_strtab * dof->dofh_secsize); 6566 6567 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6568 6569 /* 6570 * Create the provider. 6571 */ 6572 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6573 6574 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 6575 6576 meta->dtm_count--; 6577 } 6578 6579 static void 6580 dtrace_helper_remove(dof_helper_t *dhp, pid_t pid) 6581 { 6582 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6583 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6584 int i; 6585 6586 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6587 6588 for (i = 0; i < dof->dofh_secnum; i++) { 6589 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6590 dof->dofh_secoff + i * dof->dofh_secsize); 6591 6592 if (sec->dofs_type != DOF_SECT_PROVIDER) 6593 continue; 6594 6595 dtrace_helper_remove_one(dhp, sec, pid); 6596 } 6597 } 6598 6599 /* 6600 * DTrace Meta Provider-to-Framework API Functions 6601 * 6602 * These functions implement the Meta Provider-to-Framework API, as described 6603 * in <sys/dtrace.h>. 6604 */ 6605 int 6606 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 6607 dtrace_meta_provider_id_t *idp) 6608 { 6609 dtrace_meta_t *meta; 6610 dtrace_helpers_t *help, *next; 6611 int i; 6612 6613 *idp = DTRACE_METAPROVNONE; 6614 6615 /* 6616 * We strictly don't need the name, but we hold onto it for 6617 * debuggability. All hail error queues! 6618 */ 6619 if (name == NULL) { 6620 cmn_err(CE_WARN, "failed to register meta-provider: " 6621 "invalid name"); 6622 return (EINVAL); 6623 } 6624 6625 if (mops == NULL || 6626 mops->dtms_create_probe == NULL || 6627 mops->dtms_provide_pid == NULL || 6628 mops->dtms_remove_pid == NULL) { 6629 cmn_err(CE_WARN, "failed to register meta-register %s: " 6630 "invalid ops", name); 6631 return (EINVAL); 6632 } 6633 6634 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 6635 meta->dtm_mops = *mops; 6636 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6637 (void) strcpy(meta->dtm_name, name); 6638 meta->dtm_arg = arg; 6639 6640 mutex_enter(&dtrace_meta_lock); 6641 mutex_enter(&dtrace_lock); 6642 6643 if (dtrace_meta_pid != NULL) { 6644 mutex_exit(&dtrace_lock); 6645 mutex_exit(&dtrace_meta_lock); 6646 cmn_err(CE_WARN, "failed to register meta-register %s: " 6647 "user-land meta-provider exists", name); 6648 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 6649 kmem_free(meta, sizeof (dtrace_meta_t)); 6650 return (EINVAL); 6651 } 6652 6653 dtrace_meta_pid = meta; 6654 *idp = (dtrace_meta_provider_id_t)meta; 6655 6656 /* 6657 * If there are providers and probes ready to go, pass them 6658 * off to the new meta provider now. 6659 */ 6660 6661 help = dtrace_deferred_pid; 6662 dtrace_deferred_pid = NULL; 6663 6664 mutex_exit(&dtrace_lock); 6665 6666 while (help != NULL) { 6667 for (i = 0; i < help->dthps_nprovs; i++) { 6668 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 6669 help->dthps_pid); 6670 } 6671 6672 next = help->dthps_next; 6673 help->dthps_next = NULL; 6674 help->dthps_prev = NULL; 6675 help = next; 6676 } 6677 6678 mutex_exit(&dtrace_meta_lock); 6679 6680 return (0); 6681 } 6682 6683 int 6684 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 6685 { 6686 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 6687 6688 mutex_enter(&dtrace_meta_lock); 6689 mutex_enter(&dtrace_lock); 6690 6691 if (old == dtrace_meta_pid) { 6692 pp = &dtrace_meta_pid; 6693 } else { 6694 panic("attempt to unregister non-existent " 6695 "dtrace meta-provider %p\n", (void *)old); 6696 } 6697 6698 if (old->dtm_count != 0) { 6699 mutex_exit(&dtrace_lock); 6700 mutex_exit(&dtrace_meta_lock); 6701 return (EBUSY); 6702 } 6703 6704 *pp = NULL; 6705 6706 mutex_exit(&dtrace_lock); 6707 mutex_exit(&dtrace_meta_lock); 6708 6709 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 6710 kmem_free(old, sizeof (dtrace_meta_t)); 6711 6712 return (0); 6713 } 6714 6715 6716 /* 6717 * DTrace DIF Object Functions 6718 */ 6719 static int 6720 dtrace_difo_err(uint_t pc, const char *format, ...) 6721 { 6722 if (dtrace_err_verbose) { 6723 va_list alist; 6724 6725 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 6726 va_start(alist, format); 6727 (void) vuprintf(format, alist); 6728 va_end(alist); 6729 } 6730 6731 #ifdef DTRACE_ERRDEBUG 6732 dtrace_errdebug(format); 6733 #endif 6734 return (1); 6735 } 6736 6737 /* 6738 * Validate a DTrace DIF object by checking the IR instructions. The following 6739 * rules are currently enforced by dtrace_difo_validate(): 6740 * 6741 * 1. Each instruction must have a valid opcode 6742 * 2. Each register, string, variable, or subroutine reference must be valid 6743 * 3. No instruction can modify register %r0 (must be zero) 6744 * 4. All instruction reserved bits must be set to zero 6745 * 5. The last instruction must be a "ret" instruction 6746 * 6. All branch targets must reference a valid instruction _after_ the branch 6747 */ 6748 static int 6749 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 6750 cred_t *cr) 6751 { 6752 int err = 0, i; 6753 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 6754 int kcheck; 6755 uint_t pc; 6756 6757 kcheck = cr == NULL || 6758 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0; 6759 6760 dp->dtdo_destructive = 0; 6761 6762 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 6763 dif_instr_t instr = dp->dtdo_buf[pc]; 6764 6765 uint_t r1 = DIF_INSTR_R1(instr); 6766 uint_t r2 = DIF_INSTR_R2(instr); 6767 uint_t rd = DIF_INSTR_RD(instr); 6768 uint_t rs = DIF_INSTR_RS(instr); 6769 uint_t label = DIF_INSTR_LABEL(instr); 6770 uint_t v = DIF_INSTR_VAR(instr); 6771 uint_t subr = DIF_INSTR_SUBR(instr); 6772 uint_t type = DIF_INSTR_TYPE(instr); 6773 uint_t op = DIF_INSTR_OP(instr); 6774 6775 switch (op) { 6776 case DIF_OP_OR: 6777 case DIF_OP_XOR: 6778 case DIF_OP_AND: 6779 case DIF_OP_SLL: 6780 case DIF_OP_SRL: 6781 case DIF_OP_SRA: 6782 case DIF_OP_SUB: 6783 case DIF_OP_ADD: 6784 case DIF_OP_MUL: 6785 case DIF_OP_SDIV: 6786 case DIF_OP_UDIV: 6787 case DIF_OP_SREM: 6788 case DIF_OP_UREM: 6789 case DIF_OP_COPYS: 6790 if (r1 >= nregs) 6791 err += efunc(pc, "invalid register %u\n", r1); 6792 if (r2 >= nregs) 6793 err += efunc(pc, "invalid register %u\n", r2); 6794 if (rd >= nregs) 6795 err += efunc(pc, "invalid register %u\n", rd); 6796 if (rd == 0) 6797 err += efunc(pc, "cannot write to %r0\n"); 6798 break; 6799 case DIF_OP_NOT: 6800 case DIF_OP_MOV: 6801 case DIF_OP_ALLOCS: 6802 if (r1 >= nregs) 6803 err += efunc(pc, "invalid register %u\n", r1); 6804 if (r2 != 0) 6805 err += efunc(pc, "non-zero reserved bits\n"); 6806 if (rd >= nregs) 6807 err += efunc(pc, "invalid register %u\n", rd); 6808 if (rd == 0) 6809 err += efunc(pc, "cannot write to %r0\n"); 6810 break; 6811 case DIF_OP_LDSB: 6812 case DIF_OP_LDSH: 6813 case DIF_OP_LDSW: 6814 case DIF_OP_LDUB: 6815 case DIF_OP_LDUH: 6816 case DIF_OP_LDUW: 6817 case DIF_OP_LDX: 6818 if (r1 >= nregs) 6819 err += efunc(pc, "invalid register %u\n", r1); 6820 if (r2 != 0) 6821 err += efunc(pc, "non-zero reserved bits\n"); 6822 if (rd >= nregs) 6823 err += efunc(pc, "invalid register %u\n", rd); 6824 if (rd == 0) 6825 err += efunc(pc, "cannot write to %r0\n"); 6826 if (kcheck) 6827 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 6828 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 6829 break; 6830 case DIF_OP_RLDSB: 6831 case DIF_OP_RLDSH: 6832 case DIF_OP_RLDSW: 6833 case DIF_OP_RLDUB: 6834 case DIF_OP_RLDUH: 6835 case DIF_OP_RLDUW: 6836 case DIF_OP_RLDX: 6837 if (r1 >= nregs) 6838 err += efunc(pc, "invalid register %u\n", r1); 6839 if (r2 != 0) 6840 err += efunc(pc, "non-zero reserved bits\n"); 6841 if (rd >= nregs) 6842 err += efunc(pc, "invalid register %u\n", rd); 6843 if (rd == 0) 6844 err += efunc(pc, "cannot write to %r0\n"); 6845 break; 6846 case DIF_OP_ULDSB: 6847 case DIF_OP_ULDSH: 6848 case DIF_OP_ULDSW: 6849 case DIF_OP_ULDUB: 6850 case DIF_OP_ULDUH: 6851 case DIF_OP_ULDUW: 6852 case DIF_OP_ULDX: 6853 if (r1 >= nregs) 6854 err += efunc(pc, "invalid register %u\n", r1); 6855 if (r2 != 0) 6856 err += efunc(pc, "non-zero reserved bits\n"); 6857 if (rd >= nregs) 6858 err += efunc(pc, "invalid register %u\n", rd); 6859 if (rd == 0) 6860 err += efunc(pc, "cannot write to %r0\n"); 6861 break; 6862 case DIF_OP_STB: 6863 case DIF_OP_STH: 6864 case DIF_OP_STW: 6865 case DIF_OP_STX: 6866 if (r1 >= nregs) 6867 err += efunc(pc, "invalid register %u\n", r1); 6868 if (r2 != 0) 6869 err += efunc(pc, "non-zero reserved bits\n"); 6870 if (rd >= nregs) 6871 err += efunc(pc, "invalid register %u\n", rd); 6872 if (rd == 0) 6873 err += efunc(pc, "cannot write to 0 address\n"); 6874 break; 6875 case DIF_OP_CMP: 6876 case DIF_OP_SCMP: 6877 if (r1 >= nregs) 6878 err += efunc(pc, "invalid register %u\n", r1); 6879 if (r2 >= nregs) 6880 err += efunc(pc, "invalid register %u\n", r2); 6881 if (rd != 0) 6882 err += efunc(pc, "non-zero reserved bits\n"); 6883 break; 6884 case DIF_OP_TST: 6885 if (r1 >= nregs) 6886 err += efunc(pc, "invalid register %u\n", r1); 6887 if (r2 != 0 || rd != 0) 6888 err += efunc(pc, "non-zero reserved bits\n"); 6889 break; 6890 case DIF_OP_BA: 6891 case DIF_OP_BE: 6892 case DIF_OP_BNE: 6893 case DIF_OP_BG: 6894 case DIF_OP_BGU: 6895 case DIF_OP_BGE: 6896 case DIF_OP_BGEU: 6897 case DIF_OP_BL: 6898 case DIF_OP_BLU: 6899 case DIF_OP_BLE: 6900 case DIF_OP_BLEU: 6901 if (label >= dp->dtdo_len) { 6902 err += efunc(pc, "invalid branch target %u\n", 6903 label); 6904 } 6905 if (label <= pc) { 6906 err += efunc(pc, "backward branch to %u\n", 6907 label); 6908 } 6909 break; 6910 case DIF_OP_RET: 6911 if (r1 != 0 || r2 != 0) 6912 err += efunc(pc, "non-zero reserved bits\n"); 6913 if (rd >= nregs) 6914 err += efunc(pc, "invalid register %u\n", rd); 6915 break; 6916 case DIF_OP_NOP: 6917 case DIF_OP_POPTS: 6918 case DIF_OP_FLUSHTS: 6919 if (r1 != 0 || r2 != 0 || rd != 0) 6920 err += efunc(pc, "non-zero reserved bits\n"); 6921 break; 6922 case DIF_OP_SETX: 6923 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 6924 err += efunc(pc, "invalid integer ref %u\n", 6925 DIF_INSTR_INTEGER(instr)); 6926 } 6927 if (rd >= nregs) 6928 err += efunc(pc, "invalid register %u\n", rd); 6929 if (rd == 0) 6930 err += efunc(pc, "cannot write to %r0\n"); 6931 break; 6932 case DIF_OP_SETS: 6933 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 6934 err += efunc(pc, "invalid string ref %u\n", 6935 DIF_INSTR_STRING(instr)); 6936 } 6937 if (rd >= nregs) 6938 err += efunc(pc, "invalid register %u\n", rd); 6939 if (rd == 0) 6940 err += efunc(pc, "cannot write to %r0\n"); 6941 break; 6942 case DIF_OP_LDGA: 6943 case DIF_OP_LDTA: 6944 if (r1 > DIF_VAR_ARRAY_MAX) 6945 err += efunc(pc, "invalid array %u\n", r1); 6946 if (r2 >= nregs) 6947 err += efunc(pc, "invalid register %u\n", r2); 6948 if (rd >= nregs) 6949 err += efunc(pc, "invalid register %u\n", rd); 6950 if (rd == 0) 6951 err += efunc(pc, "cannot write to %r0\n"); 6952 break; 6953 case DIF_OP_LDGS: 6954 case DIF_OP_LDTS: 6955 case DIF_OP_LDLS: 6956 case DIF_OP_LDGAA: 6957 case DIF_OP_LDTAA: 6958 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 6959 err += efunc(pc, "invalid variable %u\n", v); 6960 if (rd >= nregs) 6961 err += efunc(pc, "invalid register %u\n", rd); 6962 if (rd == 0) 6963 err += efunc(pc, "cannot write to %r0\n"); 6964 break; 6965 case DIF_OP_STGS: 6966 case DIF_OP_STTS: 6967 case DIF_OP_STLS: 6968 case DIF_OP_STGAA: 6969 case DIF_OP_STTAA: 6970 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 6971 err += efunc(pc, "invalid variable %u\n", v); 6972 if (rs >= nregs) 6973 err += efunc(pc, "invalid register %u\n", rd); 6974 break; 6975 case DIF_OP_CALL: 6976 if (subr > DIF_SUBR_MAX) 6977 err += efunc(pc, "invalid subr %u\n", subr); 6978 if (rd >= nregs) 6979 err += efunc(pc, "invalid register %u\n", rd); 6980 if (rd == 0) 6981 err += efunc(pc, "cannot write to %r0\n"); 6982 6983 if (subr == DIF_SUBR_COPYOUT || 6984 subr == DIF_SUBR_COPYOUTSTR) { 6985 dp->dtdo_destructive = 1; 6986 } 6987 break; 6988 case DIF_OP_PUSHTR: 6989 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 6990 err += efunc(pc, "invalid ref type %u\n", type); 6991 if (r2 >= nregs) 6992 err += efunc(pc, "invalid register %u\n", r2); 6993 if (rs >= nregs) 6994 err += efunc(pc, "invalid register %u\n", rs); 6995 break; 6996 case DIF_OP_PUSHTV: 6997 if (type != DIF_TYPE_CTF) 6998 err += efunc(pc, "invalid val type %u\n", type); 6999 if (r2 >= nregs) 7000 err += efunc(pc, "invalid register %u\n", r2); 7001 if (rs >= nregs) 7002 err += efunc(pc, "invalid register %u\n", rs); 7003 break; 7004 default: 7005 err += efunc(pc, "invalid opcode %u\n", 7006 DIF_INSTR_OP(instr)); 7007 } 7008 } 7009 7010 if (dp->dtdo_len != 0 && 7011 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7012 err += efunc(dp->dtdo_len - 1, 7013 "expected 'ret' as last DIF instruction\n"); 7014 } 7015 7016 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7017 /* 7018 * If we're not returning by reference, the size must be either 7019 * 0 or the size of one of the base types. 7020 */ 7021 switch (dp->dtdo_rtype.dtdt_size) { 7022 case 0: 7023 case sizeof (uint8_t): 7024 case sizeof (uint16_t): 7025 case sizeof (uint32_t): 7026 case sizeof (uint64_t): 7027 break; 7028 7029 default: 7030 err += efunc(dp->dtdo_len - 1, "bad return size"); 7031 } 7032 } 7033 7034 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7035 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7036 dtrace_diftype_t *vt, *et; 7037 uint_t id, ndx; 7038 7039 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7040 v->dtdv_scope != DIFV_SCOPE_THREAD && 7041 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7042 err += efunc(i, "unrecognized variable scope %d\n", 7043 v->dtdv_scope); 7044 break; 7045 } 7046 7047 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7048 v->dtdv_kind != DIFV_KIND_SCALAR) { 7049 err += efunc(i, "unrecognized variable type %d\n", 7050 v->dtdv_kind); 7051 break; 7052 } 7053 7054 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7055 err += efunc(i, "%d exceeds variable id limit\n", id); 7056 break; 7057 } 7058 7059 if (id < DIF_VAR_OTHER_UBASE) 7060 continue; 7061 7062 /* 7063 * For user-defined variables, we need to check that this 7064 * definition is identical to any previous definition that we 7065 * encountered. 7066 */ 7067 ndx = id - DIF_VAR_OTHER_UBASE; 7068 7069 switch (v->dtdv_scope) { 7070 case DIFV_SCOPE_GLOBAL: 7071 if (ndx < vstate->dtvs_nglobals) { 7072 dtrace_statvar_t *svar; 7073 7074 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7075 existing = &svar->dtsv_var; 7076 } 7077 7078 break; 7079 7080 case DIFV_SCOPE_THREAD: 7081 if (ndx < vstate->dtvs_ntlocals) 7082 existing = &vstate->dtvs_tlocals[ndx]; 7083 break; 7084 7085 case DIFV_SCOPE_LOCAL: 7086 if (ndx < vstate->dtvs_nlocals) { 7087 dtrace_statvar_t *svar; 7088 7089 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 7090 existing = &svar->dtsv_var; 7091 } 7092 7093 break; 7094 } 7095 7096 vt = &v->dtdv_type; 7097 7098 if (vt->dtdt_flags & DIF_TF_BYREF) { 7099 if (vt->dtdt_size == 0) { 7100 err += efunc(i, "zero-sized variable\n"); 7101 break; 7102 } 7103 7104 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 7105 vt->dtdt_size > dtrace_global_maxsize) { 7106 err += efunc(i, "oversized by-ref global\n"); 7107 break; 7108 } 7109 } 7110 7111 if (existing == NULL || existing->dtdv_id == 0) 7112 continue; 7113 7114 ASSERT(existing->dtdv_id == v->dtdv_id); 7115 ASSERT(existing->dtdv_scope == v->dtdv_scope); 7116 7117 if (existing->dtdv_kind != v->dtdv_kind) 7118 err += efunc(i, "%d changed variable kind\n", id); 7119 7120 et = &existing->dtdv_type; 7121 7122 if (vt->dtdt_flags != et->dtdt_flags) { 7123 err += efunc(i, "%d changed variable type flags\n", id); 7124 break; 7125 } 7126 7127 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 7128 err += efunc(i, "%d changed variable type size\n", id); 7129 break; 7130 } 7131 } 7132 7133 return (err); 7134 } 7135 7136 /* 7137 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7138 * are much more constrained than normal DIFOs. Specifically, they may 7139 * not: 7140 * 7141 * 1. Make calls to subroutines other than copyin() or copyinstr(). 7142 * 2. Access DTrace variables other than the args[] array, and the 7143 * curthread, pid, tid and execname variables. 7144 * 3. Have thread-local variables. 7145 * 4. Have dynamic variables. 7146 */ 7147 static int 7148 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7149 { 7150 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7151 int err = 0; 7152 uint_t pc; 7153 7154 for (pc = 0; pc < dp->dtdo_len; pc++) { 7155 dif_instr_t instr = dp->dtdo_buf[pc]; 7156 7157 uint_t v = DIF_INSTR_VAR(instr); 7158 uint_t subr = DIF_INSTR_SUBR(instr); 7159 uint_t op = DIF_INSTR_OP(instr); 7160 7161 switch (op) { 7162 case DIF_OP_OR: 7163 case DIF_OP_XOR: 7164 case DIF_OP_AND: 7165 case DIF_OP_SLL: 7166 case DIF_OP_SRL: 7167 case DIF_OP_SRA: 7168 case DIF_OP_SUB: 7169 case DIF_OP_ADD: 7170 case DIF_OP_MUL: 7171 case DIF_OP_SDIV: 7172 case DIF_OP_UDIV: 7173 case DIF_OP_SREM: 7174 case DIF_OP_UREM: 7175 case DIF_OP_COPYS: 7176 case DIF_OP_NOT: 7177 case DIF_OP_MOV: 7178 case DIF_OP_RLDSB: 7179 case DIF_OP_RLDSH: 7180 case DIF_OP_RLDSW: 7181 case DIF_OP_RLDUB: 7182 case DIF_OP_RLDUH: 7183 case DIF_OP_RLDUW: 7184 case DIF_OP_RLDX: 7185 case DIF_OP_ULDSB: 7186 case DIF_OP_ULDSH: 7187 case DIF_OP_ULDSW: 7188 case DIF_OP_ULDUB: 7189 case DIF_OP_ULDUH: 7190 case DIF_OP_ULDUW: 7191 case DIF_OP_ULDX: 7192 case DIF_OP_STB: 7193 case DIF_OP_STH: 7194 case DIF_OP_STW: 7195 case DIF_OP_STX: 7196 case DIF_OP_ALLOCS: 7197 case DIF_OP_CMP: 7198 case DIF_OP_SCMP: 7199 case DIF_OP_TST: 7200 case DIF_OP_BA: 7201 case DIF_OP_BE: 7202 case DIF_OP_BNE: 7203 case DIF_OP_BG: 7204 case DIF_OP_BGU: 7205 case DIF_OP_BGE: 7206 case DIF_OP_BGEU: 7207 case DIF_OP_BL: 7208 case DIF_OP_BLU: 7209 case DIF_OP_BLE: 7210 case DIF_OP_BLEU: 7211 case DIF_OP_RET: 7212 case DIF_OP_NOP: 7213 case DIF_OP_POPTS: 7214 case DIF_OP_FLUSHTS: 7215 case DIF_OP_SETX: 7216 case DIF_OP_SETS: 7217 case DIF_OP_LDGA: 7218 case DIF_OP_LDLS: 7219 case DIF_OP_STGS: 7220 case DIF_OP_STLS: 7221 case DIF_OP_PUSHTR: 7222 case DIF_OP_PUSHTV: 7223 break; 7224 7225 case DIF_OP_LDGS: 7226 if (v >= DIF_VAR_OTHER_UBASE) 7227 break; 7228 7229 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7230 break; 7231 7232 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7233 v == DIF_VAR_TID || v == DIF_VAR_EXECNAME || 7234 v == DIF_VAR_ZONENAME) 7235 break; 7236 7237 err += efunc(pc, "illegal variable %u\n", v); 7238 break; 7239 7240 case DIF_OP_LDTA: 7241 case DIF_OP_LDTS: 7242 case DIF_OP_LDGAA: 7243 case DIF_OP_LDTAA: 7244 err += efunc(pc, "illegal dynamic variable load\n"); 7245 break; 7246 7247 case DIF_OP_STTS: 7248 case DIF_OP_STGAA: 7249 case DIF_OP_STTAA: 7250 err += efunc(pc, "illegal dynamic variable store\n"); 7251 break; 7252 7253 case DIF_OP_CALL: 7254 if (subr == DIF_SUBR_ALLOCA || 7255 subr == DIF_SUBR_BCOPY || 7256 subr == DIF_SUBR_COPYIN || 7257 subr == DIF_SUBR_COPYINTO || 7258 subr == DIF_SUBR_COPYINSTR) 7259 break; 7260 7261 err += efunc(pc, "invalid subr %u\n", subr); 7262 break; 7263 7264 default: 7265 err += efunc(pc, "invalid opcode %u\n", 7266 DIF_INSTR_OP(instr)); 7267 } 7268 } 7269 7270 return (err); 7271 } 7272 7273 /* 7274 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7275 * basis; 0 if not. 7276 */ 7277 static int 7278 dtrace_difo_cacheable(dtrace_difo_t *dp) 7279 { 7280 int i; 7281 7282 if (dp == NULL) 7283 return (0); 7284 7285 for (i = 0; i < dp->dtdo_varlen; i++) { 7286 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7287 7288 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 7289 continue; 7290 7291 switch (v->dtdv_id) { 7292 case DIF_VAR_CURTHREAD: 7293 case DIF_VAR_PID: 7294 case DIF_VAR_TID: 7295 case DIF_VAR_EXECNAME: 7296 case DIF_VAR_ZONENAME: 7297 break; 7298 7299 default: 7300 return (0); 7301 } 7302 } 7303 7304 /* 7305 * This DIF object may be cacheable. Now we need to look for any 7306 * array loading instructions, any memory loading instructions, or 7307 * any stores to thread-local variables. 7308 */ 7309 for (i = 0; i < dp->dtdo_len; i++) { 7310 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 7311 7312 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 7313 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 7314 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 7315 op == DIF_OP_LDGA || op == DIF_OP_STTS) 7316 return (0); 7317 } 7318 7319 return (1); 7320 } 7321 7322 static void 7323 dtrace_difo_hold(dtrace_difo_t *dp) 7324 { 7325 int i; 7326 7327 ASSERT(MUTEX_HELD(&dtrace_lock)); 7328 7329 dp->dtdo_refcnt++; 7330 ASSERT(dp->dtdo_refcnt != 0); 7331 7332 /* 7333 * We need to check this DIF object for references to the variable 7334 * DIF_VAR_VTIMESTAMP. 7335 */ 7336 for (i = 0; i < dp->dtdo_varlen; i++) { 7337 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7338 7339 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7340 continue; 7341 7342 if (dtrace_vtime_references++ == 0) 7343 dtrace_vtime_enable(); 7344 } 7345 } 7346 7347 /* 7348 * This routine calculates the dynamic variable chunksize for a given DIF 7349 * object. The calculation is not fool-proof, and can probably be tricked by 7350 * malicious DIF -- but it works for all compiler-generated DIF. Because this 7351 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 7352 * if a dynamic variable size exceeds the chunksize. 7353 */ 7354 static void 7355 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7356 { 7357 uint64_t sval; 7358 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 7359 const dif_instr_t *text = dp->dtdo_buf; 7360 uint_t pc, srd = 0; 7361 uint_t ttop = 0; 7362 size_t size, ksize; 7363 uint_t id, i; 7364 7365 for (pc = 0; pc < dp->dtdo_len; pc++) { 7366 dif_instr_t instr = text[pc]; 7367 uint_t op = DIF_INSTR_OP(instr); 7368 uint_t rd = DIF_INSTR_RD(instr); 7369 uint_t r1 = DIF_INSTR_R1(instr); 7370 uint_t nkeys = 0; 7371 uchar_t scope; 7372 7373 dtrace_key_t *key = tupregs; 7374 7375 switch (op) { 7376 case DIF_OP_SETX: 7377 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 7378 srd = rd; 7379 continue; 7380 7381 case DIF_OP_STTS: 7382 key = &tupregs[DIF_DTR_NREGS]; 7383 key[0].dttk_size = 0; 7384 key[1].dttk_size = 0; 7385 nkeys = 2; 7386 scope = DIFV_SCOPE_THREAD; 7387 break; 7388 7389 case DIF_OP_STGAA: 7390 case DIF_OP_STTAA: 7391 nkeys = ttop; 7392 7393 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 7394 key[nkeys++].dttk_size = 0; 7395 7396 key[nkeys++].dttk_size = 0; 7397 7398 if (op == DIF_OP_STTAA) { 7399 scope = DIFV_SCOPE_THREAD; 7400 } else { 7401 scope = DIFV_SCOPE_GLOBAL; 7402 } 7403 7404 break; 7405 7406 case DIF_OP_PUSHTR: 7407 if (ttop == DIF_DTR_NREGS) 7408 return; 7409 7410 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 7411 /* 7412 * If the register for the size of the "pushtr" 7413 * is %r0 (or the value is 0) and the type is 7414 * a string, we'll use the system-wide default 7415 * string size. 7416 */ 7417 tupregs[ttop++].dttk_size = 7418 dtrace_strsize_default; 7419 } else { 7420 if (srd == 0) 7421 return; 7422 7423 tupregs[ttop++].dttk_size = sval; 7424 } 7425 7426 break; 7427 7428 case DIF_OP_PUSHTV: 7429 if (ttop == DIF_DTR_NREGS) 7430 return; 7431 7432 tupregs[ttop++].dttk_size = 0; 7433 break; 7434 7435 case DIF_OP_FLUSHTS: 7436 ttop = 0; 7437 break; 7438 7439 case DIF_OP_POPTS: 7440 if (ttop != 0) 7441 ttop--; 7442 break; 7443 } 7444 7445 sval = 0; 7446 srd = 0; 7447 7448 if (nkeys == 0) 7449 continue; 7450 7451 /* 7452 * We have a dynamic variable allocation; calculate its size. 7453 */ 7454 for (ksize = 0, i = 0; i < nkeys; i++) 7455 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 7456 7457 size = sizeof (dtrace_dynvar_t); 7458 size += sizeof (dtrace_key_t) * (nkeys - 1); 7459 size += ksize; 7460 7461 /* 7462 * Now we need to determine the size of the stored data. 7463 */ 7464 id = DIF_INSTR_VAR(instr); 7465 7466 for (i = 0; i < dp->dtdo_varlen; i++) { 7467 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7468 7469 if (v->dtdv_id == id && v->dtdv_scope == scope) { 7470 size += v->dtdv_type.dtdt_size; 7471 break; 7472 } 7473 } 7474 7475 if (i == dp->dtdo_varlen) 7476 return; 7477 7478 /* 7479 * We have the size. If this is larger than the chunk size 7480 * for our dynamic variable state, reset the chunk size. 7481 */ 7482 size = P2ROUNDUP(size, sizeof (uint64_t)); 7483 7484 if (size > vstate->dtvs_dynvars.dtds_chunksize) 7485 vstate->dtvs_dynvars.dtds_chunksize = size; 7486 } 7487 } 7488 7489 static void 7490 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7491 { 7492 int i, oldsvars, osz, nsz, otlocals, ntlocals; 7493 uint_t id; 7494 7495 ASSERT(MUTEX_HELD(&dtrace_lock)); 7496 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 7497 7498 for (i = 0; i < dp->dtdo_varlen; i++) { 7499 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7500 dtrace_statvar_t *svar, ***svarp; 7501 size_t dsize = 0; 7502 uint8_t scope = v->dtdv_scope; 7503 int *np; 7504 7505 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7506 continue; 7507 7508 id -= DIF_VAR_OTHER_UBASE; 7509 7510 switch (scope) { 7511 case DIFV_SCOPE_THREAD: 7512 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 7513 dtrace_difv_t *tlocals; 7514 7515 if ((ntlocals = (otlocals << 1)) == 0) 7516 ntlocals = 1; 7517 7518 osz = otlocals * sizeof (dtrace_difv_t); 7519 nsz = ntlocals * sizeof (dtrace_difv_t); 7520 7521 tlocals = kmem_zalloc(nsz, KM_SLEEP); 7522 7523 if (osz != 0) { 7524 bcopy(vstate->dtvs_tlocals, 7525 tlocals, osz); 7526 kmem_free(vstate->dtvs_tlocals, osz); 7527 } 7528 7529 vstate->dtvs_tlocals = tlocals; 7530 vstate->dtvs_ntlocals = ntlocals; 7531 } 7532 7533 vstate->dtvs_tlocals[id] = *v; 7534 continue; 7535 7536 case DIFV_SCOPE_LOCAL: 7537 np = &vstate->dtvs_nlocals; 7538 svarp = &vstate->dtvs_locals; 7539 7540 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7541 dsize = NCPU * (v->dtdv_type.dtdt_size + 7542 sizeof (uint64_t)); 7543 else 7544 dsize = NCPU * sizeof (uint64_t); 7545 7546 break; 7547 7548 case DIFV_SCOPE_GLOBAL: 7549 np = &vstate->dtvs_nglobals; 7550 svarp = &vstate->dtvs_globals; 7551 7552 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7553 dsize = v->dtdv_type.dtdt_size + 7554 sizeof (uint64_t); 7555 7556 break; 7557 7558 default: 7559 ASSERT(0); 7560 } 7561 7562 while (id >= (oldsvars = *np)) { 7563 dtrace_statvar_t **statics; 7564 int newsvars, oldsize, newsize; 7565 7566 if ((newsvars = (oldsvars << 1)) == 0) 7567 newsvars = 1; 7568 7569 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 7570 newsize = newsvars * sizeof (dtrace_statvar_t *); 7571 7572 statics = kmem_zalloc(newsize, KM_SLEEP); 7573 7574 if (oldsize != 0) { 7575 bcopy(*svarp, statics, oldsize); 7576 kmem_free(*svarp, oldsize); 7577 } 7578 7579 *svarp = statics; 7580 *np = newsvars; 7581 } 7582 7583 if ((svar = (*svarp)[id]) == NULL) { 7584 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 7585 svar->dtsv_var = *v; 7586 7587 if ((svar->dtsv_size = dsize) != 0) { 7588 svar->dtsv_data = (uint64_t)(uintptr_t) 7589 kmem_zalloc(dsize, KM_SLEEP); 7590 } 7591 7592 (*svarp)[id] = svar; 7593 } 7594 7595 svar->dtsv_refcnt++; 7596 } 7597 7598 dtrace_difo_chunksize(dp, vstate); 7599 dtrace_difo_hold(dp); 7600 } 7601 7602 static dtrace_difo_t * 7603 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7604 { 7605 dtrace_difo_t *new; 7606 size_t sz; 7607 7608 ASSERT(dp->dtdo_buf != NULL); 7609 ASSERT(dp->dtdo_refcnt != 0); 7610 7611 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 7612 7613 ASSERT(dp->dtdo_buf != NULL); 7614 sz = dp->dtdo_len * sizeof (dif_instr_t); 7615 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 7616 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 7617 new->dtdo_len = dp->dtdo_len; 7618 7619 if (dp->dtdo_strtab != NULL) { 7620 ASSERT(dp->dtdo_strlen != 0); 7621 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 7622 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 7623 new->dtdo_strlen = dp->dtdo_strlen; 7624 } 7625 7626 if (dp->dtdo_inttab != NULL) { 7627 ASSERT(dp->dtdo_intlen != 0); 7628 sz = dp->dtdo_intlen * sizeof (uint64_t); 7629 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 7630 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 7631 new->dtdo_intlen = dp->dtdo_intlen; 7632 } 7633 7634 if (dp->dtdo_vartab != NULL) { 7635 ASSERT(dp->dtdo_varlen != 0); 7636 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 7637 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 7638 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 7639 new->dtdo_varlen = dp->dtdo_varlen; 7640 } 7641 7642 dtrace_difo_init(new, vstate); 7643 return (new); 7644 } 7645 7646 static void 7647 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7648 { 7649 int i; 7650 7651 ASSERT(dp->dtdo_refcnt == 0); 7652 7653 for (i = 0; i < dp->dtdo_varlen; i++) { 7654 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7655 dtrace_statvar_t *svar, **svarp; 7656 uint_t id; 7657 uint8_t scope = v->dtdv_scope; 7658 int *np; 7659 7660 switch (scope) { 7661 case DIFV_SCOPE_THREAD: 7662 continue; 7663 7664 case DIFV_SCOPE_LOCAL: 7665 np = &vstate->dtvs_nlocals; 7666 svarp = vstate->dtvs_locals; 7667 break; 7668 7669 case DIFV_SCOPE_GLOBAL: 7670 np = &vstate->dtvs_nglobals; 7671 svarp = vstate->dtvs_globals; 7672 break; 7673 7674 default: 7675 ASSERT(0); 7676 } 7677 7678 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7679 continue; 7680 7681 id -= DIF_VAR_OTHER_UBASE; 7682 ASSERT(id < *np); 7683 7684 svar = svarp[id]; 7685 ASSERT(svar != NULL); 7686 ASSERT(svar->dtsv_refcnt > 0); 7687 7688 if (--svar->dtsv_refcnt > 0) 7689 continue; 7690 7691 if (svar->dtsv_size != 0) { 7692 ASSERT(svar->dtsv_data != NULL); 7693 kmem_free((void *)(uintptr_t)svar->dtsv_data, 7694 svar->dtsv_size); 7695 } 7696 7697 kmem_free(svar, sizeof (dtrace_statvar_t)); 7698 svarp[id] = NULL; 7699 } 7700 7701 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 7702 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 7703 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 7704 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 7705 7706 kmem_free(dp, sizeof (dtrace_difo_t)); 7707 } 7708 7709 static void 7710 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7711 { 7712 int i; 7713 7714 ASSERT(MUTEX_HELD(&dtrace_lock)); 7715 ASSERT(dp->dtdo_refcnt != 0); 7716 7717 for (i = 0; i < dp->dtdo_varlen; i++) { 7718 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7719 7720 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7721 continue; 7722 7723 ASSERT(dtrace_vtime_references > 0); 7724 if (--dtrace_vtime_references == 0) 7725 dtrace_vtime_disable(); 7726 } 7727 7728 if (--dp->dtdo_refcnt == 0) 7729 dtrace_difo_destroy(dp, vstate); 7730 } 7731 7732 /* 7733 * DTrace Format Functions 7734 */ 7735 static uint16_t 7736 dtrace_format_add(dtrace_state_t *state, char *str) 7737 { 7738 char *fmt, **new; 7739 uint16_t ndx, len = strlen(str) + 1; 7740 7741 fmt = kmem_zalloc(len, KM_SLEEP); 7742 bcopy(str, fmt, len); 7743 7744 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 7745 if (state->dts_formats[ndx] == NULL) { 7746 state->dts_formats[ndx] = fmt; 7747 return (ndx + 1); 7748 } 7749 } 7750 7751 if (state->dts_nformats == USHRT_MAX) { 7752 /* 7753 * This is only likely if a denial-of-service attack is being 7754 * attempted. As such, it's okay to fail silently here. 7755 */ 7756 kmem_free(fmt, len); 7757 return (0); 7758 } 7759 7760 /* 7761 * For simplicity, we always resize the formats array to be exactly the 7762 * number of formats. 7763 */ 7764 ndx = state->dts_nformats++; 7765 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 7766 7767 if (state->dts_formats != NULL) { 7768 ASSERT(ndx != 0); 7769 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 7770 kmem_free(state->dts_formats, ndx * sizeof (char *)); 7771 } 7772 7773 state->dts_formats = new; 7774 state->dts_formats[ndx] = fmt; 7775 7776 return (ndx + 1); 7777 } 7778 7779 static void 7780 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 7781 { 7782 char *fmt; 7783 7784 ASSERT(state->dts_formats != NULL); 7785 ASSERT(format <= state->dts_nformats); 7786 ASSERT(state->dts_formats[format - 1] != NULL); 7787 7788 fmt = state->dts_formats[format - 1]; 7789 kmem_free(fmt, strlen(fmt) + 1); 7790 state->dts_formats[format - 1] = NULL; 7791 } 7792 7793 static void 7794 dtrace_format_destroy(dtrace_state_t *state) 7795 { 7796 int i; 7797 7798 if (state->dts_nformats == 0) { 7799 ASSERT(state->dts_formats == NULL); 7800 return; 7801 } 7802 7803 ASSERT(state->dts_formats != NULL); 7804 7805 for (i = 0; i < state->dts_nformats; i++) { 7806 char *fmt = state->dts_formats[i]; 7807 7808 if (fmt == NULL) 7809 continue; 7810 7811 kmem_free(fmt, strlen(fmt) + 1); 7812 } 7813 7814 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 7815 state->dts_nformats = 0; 7816 state->dts_formats = NULL; 7817 } 7818 7819 /* 7820 * DTrace Predicate Functions 7821 */ 7822 static dtrace_predicate_t * 7823 dtrace_predicate_create(dtrace_difo_t *dp) 7824 { 7825 dtrace_predicate_t *pred; 7826 7827 ASSERT(MUTEX_HELD(&dtrace_lock)); 7828 ASSERT(dp->dtdo_refcnt != 0); 7829 7830 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 7831 pred->dtp_difo = dp; 7832 pred->dtp_refcnt = 1; 7833 7834 if (!dtrace_difo_cacheable(dp)) 7835 return (pred); 7836 7837 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 7838 /* 7839 * This is only theoretically possible -- we have had 2^32 7840 * cacheable predicates on this machine. We cannot allow any 7841 * more predicates to become cacheable: as unlikely as it is, 7842 * there may be a thread caching a (now stale) predicate cache 7843 * ID. (N.B.: the temptation is being successfully resisted to 7844 * have this cmn_err() "Holy shit -- we executed this code!") 7845 */ 7846 return (pred); 7847 } 7848 7849 pred->dtp_cacheid = dtrace_predcache_id++; 7850 7851 return (pred); 7852 } 7853 7854 static void 7855 dtrace_predicate_hold(dtrace_predicate_t *pred) 7856 { 7857 ASSERT(MUTEX_HELD(&dtrace_lock)); 7858 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 7859 ASSERT(pred->dtp_refcnt > 0); 7860 7861 pred->dtp_refcnt++; 7862 } 7863 7864 static void 7865 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 7866 { 7867 dtrace_difo_t *dp = pred->dtp_difo; 7868 7869 ASSERT(MUTEX_HELD(&dtrace_lock)); 7870 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 7871 ASSERT(pred->dtp_refcnt > 0); 7872 7873 if (--pred->dtp_refcnt == 0) { 7874 dtrace_difo_release(pred->dtp_difo, vstate); 7875 kmem_free(pred, sizeof (dtrace_predicate_t)); 7876 } 7877 } 7878 7879 /* 7880 * DTrace Action Description Functions 7881 */ 7882 static dtrace_actdesc_t * 7883 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 7884 uint64_t uarg, uint64_t arg) 7885 { 7886 dtrace_actdesc_t *act; 7887 7888 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 7889 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 7890 7891 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 7892 act->dtad_kind = kind; 7893 act->dtad_ntuple = ntuple; 7894 act->dtad_uarg = uarg; 7895 act->dtad_arg = arg; 7896 act->dtad_refcnt = 1; 7897 7898 return (act); 7899 } 7900 7901 static void 7902 dtrace_actdesc_hold(dtrace_actdesc_t *act) 7903 { 7904 ASSERT(act->dtad_refcnt >= 1); 7905 act->dtad_refcnt++; 7906 } 7907 7908 static void 7909 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 7910 { 7911 dtrace_actkind_t kind = act->dtad_kind; 7912 dtrace_difo_t *dp; 7913 7914 ASSERT(act->dtad_refcnt >= 1); 7915 7916 if (--act->dtad_refcnt != 0) 7917 return; 7918 7919 if ((dp = act->dtad_difo) != NULL) 7920 dtrace_difo_release(dp, vstate); 7921 7922 if (DTRACEACT_ISPRINTFLIKE(kind)) { 7923 char *str = (char *)(uintptr_t)act->dtad_arg; 7924 7925 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 7926 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 7927 7928 if (str != NULL) 7929 kmem_free(str, strlen(str) + 1); 7930 } 7931 7932 kmem_free(act, sizeof (dtrace_actdesc_t)); 7933 } 7934 7935 /* 7936 * DTrace ECB Functions 7937 */ 7938 static dtrace_ecb_t * 7939 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 7940 { 7941 dtrace_ecb_t *ecb; 7942 dtrace_epid_t epid; 7943 7944 ASSERT(MUTEX_HELD(&dtrace_lock)); 7945 7946 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 7947 ecb->dte_predicate = NULL; 7948 ecb->dte_probe = probe; 7949 7950 /* 7951 * The default size is the size of the default action: recording 7952 * the epid. 7953 */ 7954 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 7955 ecb->dte_alignment = sizeof (dtrace_epid_t); 7956 7957 epid = state->dts_epid++; 7958 7959 if (epid - 1 >= state->dts_necbs) { 7960 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 7961 int necbs = state->dts_necbs << 1; 7962 7963 ASSERT(epid == state->dts_necbs + 1); 7964 7965 if (necbs == 0) { 7966 ASSERT(oecbs == NULL); 7967 necbs = 1; 7968 } 7969 7970 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 7971 7972 if (oecbs != NULL) 7973 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 7974 7975 dtrace_membar_producer(); 7976 state->dts_ecbs = ecbs; 7977 7978 if (oecbs != NULL) { 7979 /* 7980 * If this state is active, we must dtrace_sync() 7981 * before we can free the old dts_ecbs array: we're 7982 * coming in hot, and there may be active ring 7983 * buffer processing (which indexes into the dts_ecbs 7984 * array) on another CPU. 7985 */ 7986 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 7987 dtrace_sync(); 7988 7989 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 7990 } 7991 7992 dtrace_membar_producer(); 7993 state->dts_necbs = necbs; 7994 } 7995 7996 ecb->dte_state = state; 7997 7998 ASSERT(state->dts_ecbs[epid - 1] == NULL); 7999 dtrace_membar_producer(); 8000 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8001 8002 return (ecb); 8003 } 8004 8005 static void 8006 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8007 { 8008 dtrace_probe_t *probe = ecb->dte_probe; 8009 8010 ASSERT(MUTEX_HELD(&cpu_lock)); 8011 ASSERT(MUTEX_HELD(&dtrace_lock)); 8012 ASSERT(ecb->dte_next == NULL); 8013 8014 if (probe == NULL) { 8015 /* 8016 * This is the NULL probe -- there's nothing to do. 8017 */ 8018 return; 8019 } 8020 8021 if (probe->dtpr_ecb == NULL) { 8022 dtrace_provider_t *prov = probe->dtpr_provider; 8023 8024 /* 8025 * We're the first ECB on this probe. 8026 */ 8027 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8028 8029 if (ecb->dte_predicate != NULL) 8030 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8031 8032 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8033 probe->dtpr_id, probe->dtpr_arg); 8034 } else { 8035 /* 8036 * This probe is already active. Swing the last pointer to 8037 * point to the new ECB, and issue a dtrace_sync() to assure 8038 * that all CPUs have seen the change. 8039 */ 8040 ASSERT(probe->dtpr_ecb_last != NULL); 8041 probe->dtpr_ecb_last->dte_next = ecb; 8042 probe->dtpr_ecb_last = ecb; 8043 probe->dtpr_predcache = 0; 8044 8045 dtrace_sync(); 8046 } 8047 } 8048 8049 static void 8050 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8051 { 8052 uint32_t maxalign = sizeof (dtrace_epid_t); 8053 uint32_t align = sizeof (uint8_t), offs, diff; 8054 dtrace_action_t *act; 8055 int wastuple = 0; 8056 uint32_t aggbase = UINT32_MAX; 8057 dtrace_state_t *state = ecb->dte_state; 8058 8059 /* 8060 * If we record anything, we always record the epid. (And we always 8061 * record it first.) 8062 */ 8063 offs = sizeof (dtrace_epid_t); 8064 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8065 8066 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8067 dtrace_recdesc_t *rec = &act->dta_rec; 8068 8069 if ((align = rec->dtrd_alignment) > maxalign) 8070 maxalign = align; 8071 8072 if (!wastuple && act->dta_intuple) { 8073 /* 8074 * This is the first record in a tuple. Align the 8075 * offset to be at offset 4 in an 8-byte aligned 8076 * block. 8077 */ 8078 diff = offs + sizeof (dtrace_aggid_t); 8079 8080 if (diff = (diff & (sizeof (uint64_t) - 1))) 8081 offs += sizeof (uint64_t) - diff; 8082 8083 aggbase = offs - sizeof (dtrace_aggid_t); 8084 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 8085 } 8086 8087 /*LINTED*/ 8088 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 8089 /* 8090 * The current offset is not properly aligned; align it. 8091 */ 8092 offs += align - diff; 8093 } 8094 8095 rec->dtrd_offset = offs; 8096 8097 if (offs + rec->dtrd_size > ecb->dte_needed) { 8098 ecb->dte_needed = offs + rec->dtrd_size; 8099 8100 if (ecb->dte_needed > state->dts_needed) 8101 state->dts_needed = ecb->dte_needed; 8102 } 8103 8104 if (DTRACEACT_ISAGG(act->dta_kind)) { 8105 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8106 dtrace_action_t *first = agg->dtag_first, *prev; 8107 8108 ASSERT(rec->dtrd_size != 0 && first != NULL); 8109 ASSERT(wastuple); 8110 ASSERT(aggbase != UINT32_MAX); 8111 8112 agg->dtag_base = aggbase; 8113 8114 while ((prev = first->dta_prev) != NULL && 8115 DTRACEACT_ISAGG(prev->dta_kind)) { 8116 agg = (dtrace_aggregation_t *)prev; 8117 first = agg->dtag_first; 8118 } 8119 8120 if (prev != NULL) { 8121 offs = prev->dta_rec.dtrd_offset + 8122 prev->dta_rec.dtrd_size; 8123 } else { 8124 offs = sizeof (dtrace_epid_t); 8125 } 8126 wastuple = 0; 8127 } else { 8128 if (!act->dta_intuple) 8129 ecb->dte_size = offs + rec->dtrd_size; 8130 8131 offs += rec->dtrd_size; 8132 } 8133 8134 wastuple = act->dta_intuple; 8135 } 8136 8137 if ((act = ecb->dte_action) != NULL && 8138 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8139 ecb->dte_size == sizeof (dtrace_epid_t)) { 8140 /* 8141 * If the size is still sizeof (dtrace_epid_t), then all 8142 * actions store no data; set the size to 0. 8143 */ 8144 ecb->dte_alignment = maxalign; 8145 ecb->dte_size = 0; 8146 8147 /* 8148 * If the needed space is still sizeof (dtrace_epid_t), then 8149 * all actions need no additional space; set the needed 8150 * size to 0. 8151 */ 8152 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8153 ecb->dte_needed = 0; 8154 8155 return; 8156 } 8157 8158 /* 8159 * Set our alignment, and make sure that the dte_size and dte_needed 8160 * are aligned to the size of an EPID. 8161 */ 8162 ecb->dte_alignment = maxalign; 8163 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8164 ~(sizeof (dtrace_epid_t) - 1); 8165 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8166 ~(sizeof (dtrace_epid_t) - 1); 8167 ASSERT(ecb->dte_size <= ecb->dte_needed); 8168 } 8169 8170 static dtrace_action_t * 8171 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8172 { 8173 dtrace_aggregation_t *agg; 8174 size_t size = sizeof (uint64_t); 8175 int ntuple = desc->dtad_ntuple; 8176 dtrace_action_t *act; 8177 dtrace_recdesc_t *frec; 8178 dtrace_aggid_t aggid; 8179 dtrace_state_t *state = ecb->dte_state; 8180 8181 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8182 agg->dtag_ecb = ecb; 8183 8184 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8185 8186 switch (desc->dtad_kind) { 8187 case DTRACEAGG_MIN: 8188 agg->dtag_initial = UINT64_MAX; 8189 agg->dtag_aggregate = dtrace_aggregate_min; 8190 break; 8191 8192 case DTRACEAGG_MAX: 8193 agg->dtag_aggregate = dtrace_aggregate_max; 8194 break; 8195 8196 case DTRACEAGG_COUNT: 8197 agg->dtag_aggregate = dtrace_aggregate_count; 8198 break; 8199 8200 case DTRACEAGG_QUANTIZE: 8201 agg->dtag_aggregate = dtrace_aggregate_quantize; 8202 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8203 sizeof (uint64_t); 8204 break; 8205 8206 case DTRACEAGG_LQUANTIZE: { 8207 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8208 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8209 8210 agg->dtag_initial = desc->dtad_arg; 8211 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8212 8213 if (step == 0 || levels == 0) 8214 goto err; 8215 8216 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8217 break; 8218 } 8219 8220 case DTRACEAGG_AVG: 8221 agg->dtag_aggregate = dtrace_aggregate_avg; 8222 size = sizeof (uint64_t) * 2; 8223 break; 8224 8225 case DTRACEAGG_SUM: 8226 agg->dtag_aggregate = dtrace_aggregate_sum; 8227 break; 8228 8229 default: 8230 goto err; 8231 } 8232 8233 agg->dtag_action.dta_rec.dtrd_size = size; 8234 8235 if (ntuple == 0) 8236 goto err; 8237 8238 /* 8239 * We must make sure that we have enough actions for the n-tuple. 8240 */ 8241 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8242 if (DTRACEACT_ISAGG(act->dta_kind)) 8243 break; 8244 8245 if (--ntuple == 0) { 8246 /* 8247 * This is the action with which our n-tuple begins. 8248 */ 8249 agg->dtag_first = act; 8250 goto success; 8251 } 8252 } 8253 8254 /* 8255 * This n-tuple is short by ntuple elements. Return failure. 8256 */ 8257 ASSERT(ntuple != 0); 8258 err: 8259 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8260 return (NULL); 8261 8262 success: 8263 /* 8264 * If the last action in the tuple has a size of zero, it's actually 8265 * an expression argument for the aggregating action. 8266 */ 8267 ASSERT(ecb->dte_action_last != NULL); 8268 act = ecb->dte_action_last; 8269 8270 if (act->dta_kind == DTRACEACT_DIFEXPR) { 8271 ASSERT(act->dta_difo != NULL); 8272 8273 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 8274 agg->dtag_hasarg = 1; 8275 } 8276 8277 /* 8278 * We need to allocate an id for this aggregation. 8279 */ 8280 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8281 VM_BESTFIT | VM_SLEEP); 8282 8283 if (aggid - 1 >= state->dts_naggregations) { 8284 dtrace_aggregation_t **oaggs = state->dts_aggregations; 8285 dtrace_aggregation_t **aggs; 8286 int naggs = state->dts_naggregations << 1; 8287 int onaggs = state->dts_naggregations; 8288 8289 ASSERT(aggid == state->dts_naggregations + 1); 8290 8291 if (naggs == 0) { 8292 ASSERT(oaggs == NULL); 8293 naggs = 1; 8294 } 8295 8296 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 8297 8298 if (oaggs != NULL) { 8299 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 8300 kmem_free(oaggs, onaggs * sizeof (*aggs)); 8301 } 8302 8303 state->dts_aggregations = aggs; 8304 state->dts_naggregations = naggs; 8305 } 8306 8307 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 8308 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 8309 8310 frec = &agg->dtag_first->dta_rec; 8311 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 8312 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 8313 8314 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 8315 ASSERT(!act->dta_intuple); 8316 act->dta_intuple = 1; 8317 } 8318 8319 return (&agg->dtag_action); 8320 } 8321 8322 static void 8323 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 8324 { 8325 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8326 dtrace_state_t *state = ecb->dte_state; 8327 dtrace_aggid_t aggid = agg->dtag_id; 8328 8329 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 8330 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 8331 8332 ASSERT(state->dts_aggregations[aggid - 1] == agg); 8333 state->dts_aggregations[aggid - 1] = NULL; 8334 8335 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8336 } 8337 8338 static int 8339 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8340 { 8341 dtrace_action_t *action, *last; 8342 dtrace_difo_t *dp = desc->dtad_difo; 8343 uint32_t size = 0, align = sizeof (uint8_t), mask; 8344 uint16_t format = 0; 8345 dtrace_recdesc_t *rec; 8346 dtrace_state_t *state = ecb->dte_state; 8347 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 8348 uint64_t arg = desc->dtad_arg; 8349 8350 ASSERT(MUTEX_HELD(&dtrace_lock)); 8351 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 8352 8353 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 8354 /* 8355 * If this is an aggregating action, there must be neither 8356 * a speculate nor a commit on the action chain. 8357 */ 8358 dtrace_action_t *act; 8359 8360 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8361 if (act->dta_kind == DTRACEACT_COMMIT) 8362 return (EINVAL); 8363 8364 if (act->dta_kind == DTRACEACT_SPECULATE) 8365 return (EINVAL); 8366 } 8367 8368 action = dtrace_ecb_aggregation_create(ecb, desc); 8369 8370 if (action == NULL) 8371 return (EINVAL); 8372 } else { 8373 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 8374 (desc->dtad_kind == DTRACEACT_DIFEXPR && 8375 dp != NULL && dp->dtdo_destructive)) { 8376 state->dts_destructive = 1; 8377 } 8378 8379 switch (desc->dtad_kind) { 8380 case DTRACEACT_PRINTF: 8381 case DTRACEACT_PRINTA: 8382 case DTRACEACT_SYSTEM: 8383 case DTRACEACT_FREOPEN: 8384 /* 8385 * We know that our arg is a string -- turn it into a 8386 * format. 8387 */ 8388 if (arg == NULL) { 8389 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 8390 format = 0; 8391 } else { 8392 ASSERT(arg != NULL); 8393 ASSERT(arg > KERNELBASE); 8394 format = dtrace_format_add(state, 8395 (char *)(uintptr_t)arg); 8396 } 8397 8398 /*FALLTHROUGH*/ 8399 case DTRACEACT_LIBACT: 8400 case DTRACEACT_DIFEXPR: 8401 if (dp == NULL) 8402 return (EINVAL); 8403 8404 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 8405 break; 8406 8407 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 8408 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8409 return (EINVAL); 8410 8411 size = opt[DTRACEOPT_STRSIZE]; 8412 } 8413 8414 break; 8415 8416 case DTRACEACT_STACK: 8417 if ((nframes = arg) == 0) { 8418 nframes = opt[DTRACEOPT_STACKFRAMES]; 8419 ASSERT(nframes > 0); 8420 arg = nframes; 8421 } 8422 8423 size = nframes * sizeof (pc_t); 8424 break; 8425 8426 case DTRACEACT_JSTACK: 8427 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 8428 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 8429 8430 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 8431 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 8432 8433 arg = DTRACE_USTACK_ARG(nframes, strsize); 8434 8435 /*FALLTHROUGH*/ 8436 case DTRACEACT_USTACK: 8437 if (desc->dtad_kind != DTRACEACT_JSTACK && 8438 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 8439 strsize = DTRACE_USTACK_STRSIZE(arg); 8440 nframes = opt[DTRACEOPT_USTACKFRAMES]; 8441 ASSERT(nframes > 0); 8442 arg = DTRACE_USTACK_ARG(nframes, strsize); 8443 } 8444 8445 /* 8446 * Save a slot for the pid. 8447 */ 8448 size = (nframes + 1) * sizeof (uint64_t); 8449 size += DTRACE_USTACK_STRSIZE(arg); 8450 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 8451 8452 break; 8453 8454 case DTRACEACT_SYM: 8455 case DTRACEACT_MOD: 8456 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 8457 sizeof (uint64_t)) || 8458 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8459 return (EINVAL); 8460 break; 8461 8462 case DTRACEACT_USYM: 8463 case DTRACEACT_UMOD: 8464 case DTRACEACT_UADDR: 8465 if (dp == NULL || 8466 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 8467 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8468 return (EINVAL); 8469 8470 /* 8471 * We have a slot for the pid, plus a slot for the 8472 * argument. To keep things simple (aligned with 8473 * bitness-neutral sizing), we store each as a 64-bit 8474 * quantity. 8475 */ 8476 size = 2 * sizeof (uint64_t); 8477 break; 8478 8479 case DTRACEACT_STOP: 8480 case DTRACEACT_BREAKPOINT: 8481 case DTRACEACT_PANIC: 8482 break; 8483 8484 case DTRACEACT_CHILL: 8485 case DTRACEACT_DISCARD: 8486 case DTRACEACT_RAISE: 8487 if (dp == NULL) 8488 return (EINVAL); 8489 break; 8490 8491 case DTRACEACT_EXIT: 8492 if (dp == NULL || 8493 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 8494 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8495 return (EINVAL); 8496 break; 8497 8498 case DTRACEACT_SPECULATE: 8499 if (ecb->dte_size > sizeof (dtrace_epid_t)) 8500 return (EINVAL); 8501 8502 if (dp == NULL) 8503 return (EINVAL); 8504 8505 state->dts_speculates = 1; 8506 break; 8507 8508 case DTRACEACT_COMMIT: { 8509 dtrace_action_t *act = ecb->dte_action; 8510 8511 for (; act != NULL; act = act->dta_next) { 8512 if (act->dta_kind == DTRACEACT_COMMIT) 8513 return (EINVAL); 8514 } 8515 8516 if (dp == NULL) 8517 return (EINVAL); 8518 break; 8519 } 8520 8521 default: 8522 return (EINVAL); 8523 } 8524 8525 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 8526 /* 8527 * If this is a data-storing action or a speculate, 8528 * we must be sure that there isn't a commit on the 8529 * action chain. 8530 */ 8531 dtrace_action_t *act = ecb->dte_action; 8532 8533 for (; act != NULL; act = act->dta_next) { 8534 if (act->dta_kind == DTRACEACT_COMMIT) 8535 return (EINVAL); 8536 } 8537 } 8538 8539 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 8540 action->dta_rec.dtrd_size = size; 8541 } 8542 8543 action->dta_refcnt = 1; 8544 rec = &action->dta_rec; 8545 size = rec->dtrd_size; 8546 8547 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 8548 if (!(size & mask)) { 8549 align = mask + 1; 8550 break; 8551 } 8552 } 8553 8554 action->dta_kind = desc->dtad_kind; 8555 8556 if ((action->dta_difo = dp) != NULL) 8557 dtrace_difo_hold(dp); 8558 8559 rec->dtrd_action = action->dta_kind; 8560 rec->dtrd_arg = arg; 8561 8562 if (ecb->dte_state == dtrace_anon.dta_state) { 8563 /* 8564 * If this is an anonymous enabling, explicitly clear the uarg. 8565 */ 8566 rec->dtrd_uarg = 0; 8567 } else { 8568 rec->dtrd_uarg = desc->dtad_uarg; 8569 } 8570 8571 rec->dtrd_alignment = (uint16_t)align; 8572 rec->dtrd_format = format; 8573 8574 if ((last = ecb->dte_action_last) != NULL) { 8575 ASSERT(ecb->dte_action != NULL); 8576 action->dta_prev = last; 8577 last->dta_next = action; 8578 } else { 8579 ASSERT(ecb->dte_action == NULL); 8580 ecb->dte_action = action; 8581 } 8582 8583 ecb->dte_action_last = action; 8584 8585 return (0); 8586 } 8587 8588 static void 8589 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 8590 { 8591 dtrace_action_t *act = ecb->dte_action, *next; 8592 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 8593 dtrace_difo_t *dp; 8594 uint16_t format; 8595 8596 if (act != NULL && act->dta_refcnt > 1) { 8597 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 8598 act->dta_refcnt--; 8599 } else { 8600 for (; act != NULL; act = next) { 8601 next = act->dta_next; 8602 ASSERT(next != NULL || act == ecb->dte_action_last); 8603 ASSERT(act->dta_refcnt == 1); 8604 8605 if ((format = act->dta_rec.dtrd_format) != 0) 8606 dtrace_format_remove(ecb->dte_state, format); 8607 8608 if ((dp = act->dta_difo) != NULL) 8609 dtrace_difo_release(dp, vstate); 8610 8611 if (DTRACEACT_ISAGG(act->dta_kind)) { 8612 dtrace_ecb_aggregation_destroy(ecb, act); 8613 } else { 8614 kmem_free(act, sizeof (dtrace_action_t)); 8615 } 8616 } 8617 } 8618 8619 ecb->dte_action = NULL; 8620 ecb->dte_action_last = NULL; 8621 ecb->dte_size = sizeof (dtrace_epid_t); 8622 } 8623 8624 static void 8625 dtrace_ecb_disable(dtrace_ecb_t *ecb) 8626 { 8627 /* 8628 * We disable the ECB by removing it from its probe. 8629 */ 8630 dtrace_ecb_t *pecb, *prev = NULL; 8631 dtrace_probe_t *probe = ecb->dte_probe; 8632 8633 ASSERT(MUTEX_HELD(&dtrace_lock)); 8634 8635 if (probe == NULL) { 8636 /* 8637 * This is the NULL probe; there is nothing to disable. 8638 */ 8639 return; 8640 } 8641 8642 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 8643 if (pecb == ecb) 8644 break; 8645 prev = pecb; 8646 } 8647 8648 ASSERT(pecb != NULL); 8649 8650 if (prev == NULL) { 8651 probe->dtpr_ecb = ecb->dte_next; 8652 } else { 8653 prev->dte_next = ecb->dte_next; 8654 } 8655 8656 if (ecb == probe->dtpr_ecb_last) { 8657 ASSERT(ecb->dte_next == NULL); 8658 probe->dtpr_ecb_last = prev; 8659 } 8660 8661 /* 8662 * The ECB has been disconnected from the probe; now sync to assure 8663 * that all CPUs have seen the change before returning. 8664 */ 8665 dtrace_sync(); 8666 8667 if (probe->dtpr_ecb == NULL) { 8668 /* 8669 * That was the last ECB on the probe; clear the predicate 8670 * cache ID for the probe, disable it and sync one more time 8671 * to assure that we'll never hit it again. 8672 */ 8673 dtrace_provider_t *prov = probe->dtpr_provider; 8674 8675 ASSERT(ecb->dte_next == NULL); 8676 ASSERT(probe->dtpr_ecb_last == NULL); 8677 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 8678 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 8679 probe->dtpr_id, probe->dtpr_arg); 8680 dtrace_sync(); 8681 } else { 8682 /* 8683 * There is at least one ECB remaining on the probe. If there 8684 * is _exactly_ one, set the probe's predicate cache ID to be 8685 * the predicate cache ID of the remaining ECB. 8686 */ 8687 ASSERT(probe->dtpr_ecb_last != NULL); 8688 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 8689 8690 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 8691 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 8692 8693 ASSERT(probe->dtpr_ecb->dte_next == NULL); 8694 8695 if (p != NULL) 8696 probe->dtpr_predcache = p->dtp_cacheid; 8697 } 8698 8699 ecb->dte_next = NULL; 8700 } 8701 } 8702 8703 static void 8704 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 8705 { 8706 dtrace_state_t *state = ecb->dte_state; 8707 dtrace_vstate_t *vstate = &state->dts_vstate; 8708 dtrace_predicate_t *pred; 8709 dtrace_epid_t epid = ecb->dte_epid; 8710 8711 ASSERT(MUTEX_HELD(&dtrace_lock)); 8712 ASSERT(ecb->dte_next == NULL); 8713 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 8714 8715 if ((pred = ecb->dte_predicate) != NULL) 8716 dtrace_predicate_release(pred, vstate); 8717 8718 dtrace_ecb_action_remove(ecb); 8719 8720 ASSERT(state->dts_ecbs[epid - 1] == ecb); 8721 state->dts_ecbs[epid - 1] = NULL; 8722 8723 kmem_free(ecb, sizeof (dtrace_ecb_t)); 8724 } 8725 8726 static dtrace_ecb_t * 8727 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 8728 dtrace_enabling_t *enab) 8729 { 8730 dtrace_ecb_t *ecb; 8731 dtrace_predicate_t *pred; 8732 dtrace_actdesc_t *act; 8733 dtrace_provider_t *prov; 8734 dtrace_ecbdesc_t *desc = enab->dten_current; 8735 8736 ASSERT(MUTEX_HELD(&dtrace_lock)); 8737 ASSERT(state != NULL); 8738 8739 ecb = dtrace_ecb_add(state, probe); 8740 ecb->dte_uarg = desc->dted_uarg; 8741 8742 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 8743 dtrace_predicate_hold(pred); 8744 ecb->dte_predicate = pred; 8745 } 8746 8747 if (probe != NULL) { 8748 /* 8749 * If the provider shows more leg than the consumer is old 8750 * enough to see, we need to enable the appropriate implicit 8751 * predicate bits to prevent the ecb from activating at 8752 * revealing times. 8753 */ 8754 prov = probe->dtpr_provider; 8755 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 8756 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 8757 ecb->dte_cond |= DTRACE_COND_OWNER; 8758 8759 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 8760 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 8761 ecb->dte_cond |= DTRACE_COND_USERMODE; 8762 } 8763 8764 if (dtrace_ecb_create_cache != NULL) { 8765 /* 8766 * If we have a cached ecb, we'll use its action list instead 8767 * of creating our own (saving both time and space). 8768 */ 8769 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 8770 dtrace_action_t *act = cached->dte_action; 8771 8772 if (act != NULL) { 8773 ASSERT(act->dta_refcnt > 0); 8774 act->dta_refcnt++; 8775 ecb->dte_action = act; 8776 ecb->dte_action_last = cached->dte_action_last; 8777 ecb->dte_needed = cached->dte_needed; 8778 ecb->dte_size = cached->dte_size; 8779 ecb->dte_alignment = cached->dte_alignment; 8780 } 8781 8782 return (ecb); 8783 } 8784 8785 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 8786 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 8787 dtrace_ecb_destroy(ecb); 8788 return (NULL); 8789 } 8790 } 8791 8792 dtrace_ecb_resize(ecb); 8793 8794 return (dtrace_ecb_create_cache = ecb); 8795 } 8796 8797 static int 8798 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 8799 { 8800 dtrace_ecb_t *ecb; 8801 dtrace_enabling_t *enab = arg; 8802 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 8803 8804 ASSERT(state != NULL); 8805 8806 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 8807 /* 8808 * This probe was created in a generation for which this 8809 * enabling has previously created ECBs; we don't want to 8810 * enable it again, so just kick out. 8811 */ 8812 return (DTRACE_MATCH_NEXT); 8813 } 8814 8815 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 8816 return (DTRACE_MATCH_DONE); 8817 8818 dtrace_ecb_enable(ecb); 8819 return (DTRACE_MATCH_NEXT); 8820 } 8821 8822 static dtrace_ecb_t * 8823 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 8824 { 8825 dtrace_ecb_t *ecb; 8826 8827 ASSERT(MUTEX_HELD(&dtrace_lock)); 8828 8829 if (id == 0 || id > state->dts_necbs) 8830 return (NULL); 8831 8832 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 8833 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 8834 8835 return (state->dts_ecbs[id - 1]); 8836 } 8837 8838 static dtrace_aggregation_t * 8839 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 8840 { 8841 dtrace_aggregation_t *agg; 8842 8843 ASSERT(MUTEX_HELD(&dtrace_lock)); 8844 8845 if (id == 0 || id > state->dts_naggregations) 8846 return (NULL); 8847 8848 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 8849 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 8850 agg->dtag_id == id); 8851 8852 return (state->dts_aggregations[id - 1]); 8853 } 8854 8855 /* 8856 * DTrace Buffer Functions 8857 * 8858 * The following functions manipulate DTrace buffers. Most of these functions 8859 * are called in the context of establishing or processing consumer state; 8860 * exceptions are explicitly noted. 8861 */ 8862 8863 /* 8864 * Note: called from cross call context. This function switches the two 8865 * buffers on a given CPU. The atomicity of this operation is assured by 8866 * disabling interrupts while the actual switch takes place; the disabling of 8867 * interrupts serializes the execution with any execution of dtrace_probe() on 8868 * the same CPU. 8869 */ 8870 static void 8871 dtrace_buffer_switch(dtrace_buffer_t *buf) 8872 { 8873 caddr_t tomax = buf->dtb_tomax; 8874 caddr_t xamot = buf->dtb_xamot; 8875 dtrace_icookie_t cookie; 8876 8877 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 8878 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 8879 8880 cookie = dtrace_interrupt_disable(); 8881 buf->dtb_tomax = xamot; 8882 buf->dtb_xamot = tomax; 8883 buf->dtb_xamot_drops = buf->dtb_drops; 8884 buf->dtb_xamot_offset = buf->dtb_offset; 8885 buf->dtb_xamot_errors = buf->dtb_errors; 8886 buf->dtb_xamot_flags = buf->dtb_flags; 8887 buf->dtb_offset = 0; 8888 buf->dtb_drops = 0; 8889 buf->dtb_errors = 0; 8890 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 8891 dtrace_interrupt_enable(cookie); 8892 } 8893 8894 /* 8895 * Note: called from cross call context. This function activates a buffer 8896 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 8897 * is guaranteed by the disabling of interrupts. 8898 */ 8899 static void 8900 dtrace_buffer_activate(dtrace_state_t *state) 8901 { 8902 dtrace_buffer_t *buf; 8903 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 8904 8905 buf = &state->dts_buffer[CPU->cpu_id]; 8906 8907 if (buf->dtb_tomax != NULL) { 8908 /* 8909 * We might like to assert that the buffer is marked inactive, 8910 * but this isn't necessarily true: the buffer for the CPU 8911 * that processes the BEGIN probe has its buffer activated 8912 * manually. In this case, we take the (harmless) action 8913 * re-clearing the bit INACTIVE bit. 8914 */ 8915 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 8916 } 8917 8918 dtrace_interrupt_enable(cookie); 8919 } 8920 8921 static int 8922 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 8923 processorid_t cpu) 8924 { 8925 cpu_t *cp; 8926 dtrace_buffer_t *buf; 8927 8928 ASSERT(MUTEX_HELD(&cpu_lock)); 8929 ASSERT(MUTEX_HELD(&dtrace_lock)); 8930 8931 if (crgetuid(CRED()) != 0 && size > dtrace_nonroot_maxsize) 8932 return (EFBIG); 8933 8934 cp = cpu_list; 8935 8936 do { 8937 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8938 continue; 8939 8940 buf = &bufs[cp->cpu_id]; 8941 8942 /* 8943 * If there is already a buffer allocated for this CPU, it 8944 * is only possible that this is a DR event. In this case, 8945 * the buffer size must match our specified size. 8946 */ 8947 if (buf->dtb_tomax != NULL) { 8948 ASSERT(buf->dtb_size == size); 8949 continue; 8950 } 8951 8952 ASSERT(buf->dtb_xamot == NULL); 8953 8954 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8955 goto err; 8956 8957 buf->dtb_size = size; 8958 buf->dtb_flags = flags; 8959 buf->dtb_offset = 0; 8960 buf->dtb_drops = 0; 8961 8962 if (flags & DTRACEBUF_NOSWITCH) 8963 continue; 8964 8965 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8966 goto err; 8967 } while ((cp = cp->cpu_next) != cpu_list); 8968 8969 return (0); 8970 8971 err: 8972 cp = cpu_list; 8973 8974 do { 8975 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8976 continue; 8977 8978 buf = &bufs[cp->cpu_id]; 8979 8980 if (buf->dtb_xamot != NULL) { 8981 ASSERT(buf->dtb_tomax != NULL); 8982 ASSERT(buf->dtb_size == size); 8983 kmem_free(buf->dtb_xamot, size); 8984 } 8985 8986 if (buf->dtb_tomax != NULL) { 8987 ASSERT(buf->dtb_size == size); 8988 kmem_free(buf->dtb_tomax, size); 8989 } 8990 8991 buf->dtb_tomax = NULL; 8992 buf->dtb_xamot = NULL; 8993 buf->dtb_size = 0; 8994 } while ((cp = cp->cpu_next) != cpu_list); 8995 8996 return (ENOMEM); 8997 } 8998 8999 /* 9000 * Note: called from probe context. This function just increments the drop 9001 * count on a buffer. It has been made a function to allow for the 9002 * possibility of understanding the source of mysterious drop counts. (A 9003 * problem for which one may be particularly disappointed that DTrace cannot 9004 * be used to understand DTrace.) 9005 */ 9006 static void 9007 dtrace_buffer_drop(dtrace_buffer_t *buf) 9008 { 9009 buf->dtb_drops++; 9010 } 9011 9012 /* 9013 * Note: called from probe context. This function is called to reserve space 9014 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9015 * mstate. Returns the new offset in the buffer, or a negative value if an 9016 * error has occurred. 9017 */ 9018 static intptr_t 9019 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9020 dtrace_state_t *state, dtrace_mstate_t *mstate) 9021 { 9022 intptr_t offs = buf->dtb_offset, soffs; 9023 intptr_t woffs; 9024 caddr_t tomax; 9025 size_t total; 9026 9027 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9028 return (-1); 9029 9030 if ((tomax = buf->dtb_tomax) == NULL) { 9031 dtrace_buffer_drop(buf); 9032 return (-1); 9033 } 9034 9035 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9036 while (offs & (align - 1)) { 9037 /* 9038 * Assert that our alignment is off by a number which 9039 * is itself sizeof (uint32_t) aligned. 9040 */ 9041 ASSERT(!((align - (offs & (align - 1))) & 9042 (sizeof (uint32_t) - 1))); 9043 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9044 offs += sizeof (uint32_t); 9045 } 9046 9047 if ((soffs = offs + needed) > buf->dtb_size) { 9048 dtrace_buffer_drop(buf); 9049 return (-1); 9050 } 9051 9052 if (mstate == NULL) 9053 return (offs); 9054 9055 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 9056 mstate->dtms_scratch_size = buf->dtb_size - soffs; 9057 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9058 9059 return (offs); 9060 } 9061 9062 if (buf->dtb_flags & DTRACEBUF_FILL) { 9063 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 9064 (buf->dtb_flags & DTRACEBUF_FULL)) 9065 return (-1); 9066 goto out; 9067 } 9068 9069 total = needed + (offs & (align - 1)); 9070 9071 /* 9072 * For a ring buffer, life is quite a bit more complicated. Before 9073 * we can store any padding, we need to adjust our wrapping offset. 9074 * (If we've never before wrapped or we're not about to, no adjustment 9075 * is required.) 9076 */ 9077 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 9078 offs + total > buf->dtb_size) { 9079 woffs = buf->dtb_xamot_offset; 9080 9081 if (offs + total > buf->dtb_size) { 9082 /* 9083 * We can't fit in the end of the buffer. First, a 9084 * sanity check that we can fit in the buffer at all. 9085 */ 9086 if (total > buf->dtb_size) { 9087 dtrace_buffer_drop(buf); 9088 return (-1); 9089 } 9090 9091 /* 9092 * We're going to be storing at the top of the buffer, 9093 * so now we need to deal with the wrapped offset. We 9094 * only reset our wrapped offset to 0 if it is 9095 * currently greater than the current offset. If it 9096 * is less than the current offset, it is because a 9097 * previous allocation induced a wrap -- but the 9098 * allocation didn't subsequently take the space due 9099 * to an error or false predicate evaluation. In this 9100 * case, we'll just leave the wrapped offset alone: if 9101 * the wrapped offset hasn't been advanced far enough 9102 * for this allocation, it will be adjusted in the 9103 * lower loop. 9104 */ 9105 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 9106 if (woffs >= offs) 9107 woffs = 0; 9108 } else { 9109 woffs = 0; 9110 } 9111 9112 /* 9113 * Now we know that we're going to be storing to the 9114 * top of the buffer and that there is room for us 9115 * there. We need to clear the buffer from the current 9116 * offset to the end (there may be old gunk there). 9117 */ 9118 while (offs < buf->dtb_size) 9119 tomax[offs++] = 0; 9120 9121 /* 9122 * We need to set our offset to zero. And because we 9123 * are wrapping, we need to set the bit indicating as 9124 * much. We can also adjust our needed space back 9125 * down to the space required by the ECB -- we know 9126 * that the top of the buffer is aligned. 9127 */ 9128 offs = 0; 9129 total = needed; 9130 buf->dtb_flags |= DTRACEBUF_WRAPPED; 9131 } else { 9132 /* 9133 * There is room for us in the buffer, so we simply 9134 * need to check the wrapped offset. 9135 */ 9136 if (woffs < offs) { 9137 /* 9138 * The wrapped offset is less than the offset. 9139 * This can happen if we allocated buffer space 9140 * that induced a wrap, but then we didn't 9141 * subsequently take the space due to an error 9142 * or false predicate evaluation. This is 9143 * okay; we know that _this_ allocation isn't 9144 * going to induce a wrap. We still can't 9145 * reset the wrapped offset to be zero, 9146 * however: the space may have been trashed in 9147 * the previous failed probe attempt. But at 9148 * least the wrapped offset doesn't need to 9149 * be adjusted at all... 9150 */ 9151 goto out; 9152 } 9153 } 9154 9155 while (offs + total > woffs) { 9156 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 9157 size_t size; 9158 9159 if (epid == DTRACE_EPIDNONE) { 9160 size = sizeof (uint32_t); 9161 } else { 9162 ASSERT(epid <= state->dts_necbs); 9163 ASSERT(state->dts_ecbs[epid - 1] != NULL); 9164 9165 size = state->dts_ecbs[epid - 1]->dte_size; 9166 } 9167 9168 ASSERT(woffs + size <= buf->dtb_size); 9169 ASSERT(size != 0); 9170 9171 if (woffs + size == buf->dtb_size) { 9172 /* 9173 * We've reached the end of the buffer; we want 9174 * to set the wrapped offset to 0 and break 9175 * out. However, if the offs is 0, then we're 9176 * in a strange edge-condition: the amount of 9177 * space that we want to reserve plus the size 9178 * of the record that we're overwriting is 9179 * greater than the size of the buffer. This 9180 * is problematic because if we reserve the 9181 * space but subsequently don't consume it (due 9182 * to a failed predicate or error) the wrapped 9183 * offset will be 0 -- yet the EPID at offset 0 9184 * will not be committed. This situation is 9185 * relatively easy to deal with: if we're in 9186 * this case, the buffer is indistinguishable 9187 * from one that hasn't wrapped; we need only 9188 * finish the job by clearing the wrapped bit, 9189 * explicitly setting the offset to be 0, and 9190 * zero'ing out the old data in the buffer. 9191 */ 9192 if (offs == 0) { 9193 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9194 buf->dtb_offset = 0; 9195 woffs = total; 9196 9197 while (woffs < buf->dtb_size) 9198 tomax[woffs++] = 0; 9199 } 9200 9201 woffs = 0; 9202 break; 9203 } 9204 9205 woffs += size; 9206 } 9207 9208 /* 9209 * We have a wrapped offset. It may be that the wrapped offset 9210 * has become zero -- that's okay. 9211 */ 9212 buf->dtb_xamot_offset = woffs; 9213 } 9214 9215 out: 9216 /* 9217 * Now we can plow the buffer with any necessary padding. 9218 */ 9219 while (offs & (align - 1)) { 9220 /* 9221 * Assert that our alignment is off by a number which 9222 * is itself sizeof (uint32_t) aligned. 9223 */ 9224 ASSERT(!((align - (offs & (align - 1))) & 9225 (sizeof (uint32_t) - 1))); 9226 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9227 offs += sizeof (uint32_t); 9228 } 9229 9230 if (buf->dtb_flags & DTRACEBUF_FILL) { 9231 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9232 buf->dtb_flags |= DTRACEBUF_FULL; 9233 return (-1); 9234 } 9235 } 9236 9237 if (mstate == NULL) 9238 return (offs); 9239 9240 /* 9241 * For ring buffers and fill buffers, the scratch space is always 9242 * the inactive buffer. 9243 */ 9244 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9245 mstate->dtms_scratch_size = buf->dtb_size; 9246 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9247 9248 return (offs); 9249 } 9250 9251 static void 9252 dtrace_buffer_polish(dtrace_buffer_t *buf) 9253 { 9254 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9255 ASSERT(MUTEX_HELD(&dtrace_lock)); 9256 9257 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9258 return; 9259 9260 /* 9261 * We need to polish the ring buffer. There are three cases: 9262 * 9263 * - The first (and presumably most common) is that there is no gap 9264 * between the buffer offset and the wrapped offset. In this case, 9265 * there is nothing in the buffer that isn't valid data; we can 9266 * mark the buffer as polished and return. 9267 * 9268 * - The second (less common than the first but still more common 9269 * than the third) is that there is a gap between the buffer offset 9270 * and the wrapped offset, and the wrapped offset is larger than the 9271 * buffer offset. This can happen because of an alignment issue, or 9272 * can happen because of a call to dtrace_buffer_reserve() that 9273 * didn't subsequently consume the buffer space. In this case, 9274 * we need to zero the data from the buffer offset to the wrapped 9275 * offset. 9276 * 9277 * - The third (and least common) is that there is a gap between the 9278 * buffer offset and the wrapped offset, but the wrapped offset is 9279 * _less_ than the buffer offset. This can only happen because a 9280 * call to dtrace_buffer_reserve() induced a wrap, but the space 9281 * was not subsequently consumed. In this case, we need to zero the 9282 * space from the offset to the end of the buffer _and_ from the 9283 * top of the buffer to the wrapped offset. 9284 */ 9285 if (buf->dtb_offset < buf->dtb_xamot_offset) { 9286 bzero(buf->dtb_tomax + buf->dtb_offset, 9287 buf->dtb_xamot_offset - buf->dtb_offset); 9288 } 9289 9290 if (buf->dtb_offset > buf->dtb_xamot_offset) { 9291 bzero(buf->dtb_tomax + buf->dtb_offset, 9292 buf->dtb_size - buf->dtb_offset); 9293 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 9294 } 9295 } 9296 9297 static void 9298 dtrace_buffer_free(dtrace_buffer_t *bufs) 9299 { 9300 int i; 9301 9302 for (i = 0; i < NCPU; i++) { 9303 dtrace_buffer_t *buf = &bufs[i]; 9304 9305 if (buf->dtb_tomax == NULL) { 9306 ASSERT(buf->dtb_xamot == NULL); 9307 ASSERT(buf->dtb_size == 0); 9308 continue; 9309 } 9310 9311 if (buf->dtb_xamot != NULL) { 9312 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9313 kmem_free(buf->dtb_xamot, buf->dtb_size); 9314 } 9315 9316 kmem_free(buf->dtb_tomax, buf->dtb_size); 9317 buf->dtb_size = 0; 9318 buf->dtb_tomax = NULL; 9319 buf->dtb_xamot = NULL; 9320 } 9321 } 9322 9323 /* 9324 * DTrace Enabling Functions 9325 */ 9326 static dtrace_enabling_t * 9327 dtrace_enabling_create(dtrace_vstate_t *vstate) 9328 { 9329 dtrace_enabling_t *enab; 9330 9331 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 9332 enab->dten_vstate = vstate; 9333 9334 return (enab); 9335 } 9336 9337 static void 9338 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 9339 { 9340 dtrace_ecbdesc_t **ndesc; 9341 size_t osize, nsize; 9342 9343 /* 9344 * We can't add to enablings after we've enabled them, or after we've 9345 * retained them. 9346 */ 9347 ASSERT(enab->dten_probegen == 0); 9348 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9349 9350 if (enab->dten_ndesc < enab->dten_maxdesc) { 9351 enab->dten_desc[enab->dten_ndesc++] = ecb; 9352 return; 9353 } 9354 9355 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9356 9357 if (enab->dten_maxdesc == 0) { 9358 enab->dten_maxdesc = 1; 9359 } else { 9360 enab->dten_maxdesc <<= 1; 9361 } 9362 9363 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 9364 9365 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9366 ndesc = kmem_zalloc(nsize, KM_SLEEP); 9367 bcopy(enab->dten_desc, ndesc, osize); 9368 kmem_free(enab->dten_desc, osize); 9369 9370 enab->dten_desc = ndesc; 9371 enab->dten_desc[enab->dten_ndesc++] = ecb; 9372 } 9373 9374 static void 9375 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 9376 dtrace_probedesc_t *pd) 9377 { 9378 dtrace_ecbdesc_t *new; 9379 dtrace_predicate_t *pred; 9380 dtrace_actdesc_t *act; 9381 9382 /* 9383 * We're going to create a new ECB description that matches the 9384 * specified ECB in every way, but has the specified probe description. 9385 */ 9386 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 9387 9388 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 9389 dtrace_predicate_hold(pred); 9390 9391 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 9392 dtrace_actdesc_hold(act); 9393 9394 new->dted_action = ecb->dted_action; 9395 new->dted_pred = ecb->dted_pred; 9396 new->dted_probe = *pd; 9397 new->dted_uarg = ecb->dted_uarg; 9398 9399 dtrace_enabling_add(enab, new); 9400 } 9401 9402 static void 9403 dtrace_enabling_dump(dtrace_enabling_t *enab) 9404 { 9405 int i; 9406 9407 for (i = 0; i < enab->dten_ndesc; i++) { 9408 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 9409 9410 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 9411 desc->dtpd_provider, desc->dtpd_mod, 9412 desc->dtpd_func, desc->dtpd_name); 9413 } 9414 } 9415 9416 static void 9417 dtrace_enabling_destroy(dtrace_enabling_t *enab) 9418 { 9419 int i; 9420 dtrace_ecbdesc_t *ep; 9421 dtrace_vstate_t *vstate = enab->dten_vstate; 9422 9423 ASSERT(MUTEX_HELD(&dtrace_lock)); 9424 9425 for (i = 0; i < enab->dten_ndesc; i++) { 9426 dtrace_actdesc_t *act, *next; 9427 dtrace_predicate_t *pred; 9428 9429 ep = enab->dten_desc[i]; 9430 9431 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 9432 dtrace_predicate_release(pred, vstate); 9433 9434 for (act = ep->dted_action; act != NULL; act = next) { 9435 next = act->dtad_next; 9436 dtrace_actdesc_release(act, vstate); 9437 } 9438 9439 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 9440 } 9441 9442 kmem_free(enab->dten_desc, 9443 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 9444 9445 /* 9446 * If this was a retained enabling, decrement the dts_nretained count 9447 * and take it off of the dtrace_retained list. 9448 */ 9449 if (enab->dten_prev != NULL || enab->dten_next != NULL || 9450 dtrace_retained == enab) { 9451 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9452 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 9453 enab->dten_vstate->dtvs_state->dts_nretained--; 9454 } 9455 9456 if (enab->dten_prev == NULL) { 9457 if (dtrace_retained == enab) { 9458 dtrace_retained = enab->dten_next; 9459 9460 if (dtrace_retained != NULL) 9461 dtrace_retained->dten_prev = NULL; 9462 } 9463 } else { 9464 ASSERT(enab != dtrace_retained); 9465 ASSERT(dtrace_retained != NULL); 9466 enab->dten_prev->dten_next = enab->dten_next; 9467 } 9468 9469 if (enab->dten_next != NULL) { 9470 ASSERT(dtrace_retained != NULL); 9471 enab->dten_next->dten_prev = enab->dten_prev; 9472 } 9473 9474 kmem_free(enab, sizeof (dtrace_enabling_t)); 9475 } 9476 9477 static int 9478 dtrace_enabling_retain(dtrace_enabling_t *enab) 9479 { 9480 dtrace_state_t *state; 9481 9482 ASSERT(MUTEX_HELD(&dtrace_lock)); 9483 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9484 ASSERT(enab->dten_vstate != NULL); 9485 9486 state = enab->dten_vstate->dtvs_state; 9487 ASSERT(state != NULL); 9488 9489 /* 9490 * We only allow each state to retain dtrace_retain_max enablings. 9491 */ 9492 if (state->dts_nretained >= dtrace_retain_max) 9493 return (ENOSPC); 9494 9495 state->dts_nretained++; 9496 9497 if (dtrace_retained == NULL) { 9498 dtrace_retained = enab; 9499 return (0); 9500 } 9501 9502 enab->dten_next = dtrace_retained; 9503 dtrace_retained->dten_prev = enab; 9504 dtrace_retained = enab; 9505 9506 return (0); 9507 } 9508 9509 static int 9510 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 9511 dtrace_probedesc_t *create) 9512 { 9513 dtrace_enabling_t *new, *enab; 9514 int found = 0, err = ENOENT; 9515 9516 ASSERT(MUTEX_HELD(&dtrace_lock)); 9517 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 9518 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 9519 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 9520 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 9521 9522 new = dtrace_enabling_create(&state->dts_vstate); 9523 9524 /* 9525 * Iterate over all retained enablings, looking for enablings that 9526 * match the specified state. 9527 */ 9528 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9529 int i; 9530 9531 /* 9532 * dtvs_state can only be NULL for helper enablings -- and 9533 * helper enablings can't be retained. 9534 */ 9535 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9536 9537 if (enab->dten_vstate->dtvs_state != state) 9538 continue; 9539 9540 /* 9541 * Now iterate over each probe description; we're looking for 9542 * an exact match to the specified probe description. 9543 */ 9544 for (i = 0; i < enab->dten_ndesc; i++) { 9545 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9546 dtrace_probedesc_t *pd = &ep->dted_probe; 9547 9548 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 9549 continue; 9550 9551 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 9552 continue; 9553 9554 if (strcmp(pd->dtpd_func, match->dtpd_func)) 9555 continue; 9556 9557 if (strcmp(pd->dtpd_name, match->dtpd_name)) 9558 continue; 9559 9560 /* 9561 * We have a winning probe! Add it to our growing 9562 * enabling. 9563 */ 9564 found = 1; 9565 dtrace_enabling_addlike(new, ep, create); 9566 } 9567 } 9568 9569 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 9570 dtrace_enabling_destroy(new); 9571 return (err); 9572 } 9573 9574 return (0); 9575 } 9576 9577 static void 9578 dtrace_enabling_retract(dtrace_state_t *state) 9579 { 9580 dtrace_enabling_t *enab, *next; 9581 9582 ASSERT(MUTEX_HELD(&dtrace_lock)); 9583 9584 /* 9585 * Iterate over all retained enablings, destroy the enablings retained 9586 * for the specified state. 9587 */ 9588 for (enab = dtrace_retained; enab != NULL; enab = next) { 9589 next = enab->dten_next; 9590 9591 /* 9592 * dtvs_state can only be NULL for helper enablings -- and 9593 * helper enablings can't be retained. 9594 */ 9595 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9596 9597 if (enab->dten_vstate->dtvs_state == state) { 9598 ASSERT(state->dts_nretained > 0); 9599 dtrace_enabling_destroy(enab); 9600 } 9601 } 9602 9603 ASSERT(state->dts_nretained == 0); 9604 } 9605 9606 static int 9607 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 9608 { 9609 int i = 0; 9610 int matched = 0; 9611 9612 ASSERT(MUTEX_HELD(&cpu_lock)); 9613 ASSERT(MUTEX_HELD(&dtrace_lock)); 9614 9615 for (i = 0; i < enab->dten_ndesc; i++) { 9616 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9617 9618 enab->dten_current = ep; 9619 enab->dten_error = 0; 9620 9621 matched += dtrace_probe_enable(&ep->dted_probe, enab); 9622 9623 if (enab->dten_error != 0) { 9624 /* 9625 * If we get an error half-way through enabling the 9626 * probes, we kick out -- perhaps with some number of 9627 * them enabled. Leaving enabled probes enabled may 9628 * be slightly confusing for user-level, but we expect 9629 * that no one will attempt to actually drive on in 9630 * the face of such errors. If this is an anonymous 9631 * enabling (indicated with a NULL nmatched pointer), 9632 * we cmn_err() a message. We aren't expecting to 9633 * get such an error -- such as it can exist at all, 9634 * it would be a result of corrupted DOF in the driver 9635 * properties. 9636 */ 9637 if (nmatched == NULL) { 9638 cmn_err(CE_WARN, "dtrace_enabling_match() " 9639 "error on %p: %d", (void *)ep, 9640 enab->dten_error); 9641 } 9642 9643 return (enab->dten_error); 9644 } 9645 } 9646 9647 enab->dten_probegen = dtrace_probegen; 9648 if (nmatched != NULL) 9649 *nmatched = matched; 9650 9651 return (0); 9652 } 9653 9654 static void 9655 dtrace_enabling_matchall(void) 9656 { 9657 dtrace_enabling_t *enab; 9658 9659 mutex_enter(&cpu_lock); 9660 mutex_enter(&dtrace_lock); 9661 9662 /* 9663 * Because we can be called after dtrace_detach() has been called, we 9664 * cannot assert that there are retained enablings. We can safely 9665 * load from dtrace_retained, however: the taskq_destroy() at the 9666 * end of dtrace_detach() will block pending our completion. 9667 */ 9668 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 9669 (void) dtrace_enabling_match(enab, NULL); 9670 9671 mutex_exit(&dtrace_lock); 9672 mutex_exit(&cpu_lock); 9673 } 9674 9675 static int 9676 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 9677 { 9678 dtrace_enabling_t *enab; 9679 int matched, total = 0, err; 9680 9681 ASSERT(MUTEX_HELD(&cpu_lock)); 9682 ASSERT(MUTEX_HELD(&dtrace_lock)); 9683 9684 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9685 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9686 9687 if (enab->dten_vstate->dtvs_state != state) 9688 continue; 9689 9690 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 9691 return (err); 9692 9693 total += matched; 9694 } 9695 9696 if (nmatched != NULL) 9697 *nmatched = total; 9698 9699 return (0); 9700 } 9701 9702 /* 9703 * If an enabling is to be enabled without having matched probes (that is, if 9704 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 9705 * enabling must be _primed_ by creating an ECB for every ECB description. 9706 * This must be done to assure that we know the number of speculations, the 9707 * number of aggregations, the minimum buffer size needed, etc. before we 9708 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 9709 * enabling any probes, we create ECBs for every ECB decription, but with a 9710 * NULL probe -- which is exactly what this function does. 9711 */ 9712 static void 9713 dtrace_enabling_prime(dtrace_state_t *state) 9714 { 9715 dtrace_enabling_t *enab; 9716 int i; 9717 9718 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9719 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9720 9721 if (enab->dten_vstate->dtvs_state != state) 9722 continue; 9723 9724 /* 9725 * We don't want to prime an enabling more than once, lest 9726 * we allow a malicious user to induce resource exhaustion. 9727 * (The ECBs that result from priming an enabling aren't 9728 * leaked -- but they also aren't deallocated until the 9729 * consumer state is destroyed.) 9730 */ 9731 if (enab->dten_primed) 9732 continue; 9733 9734 for (i = 0; i < enab->dten_ndesc; i++) { 9735 enab->dten_current = enab->dten_desc[i]; 9736 (void) dtrace_probe_enable(NULL, enab); 9737 } 9738 9739 enab->dten_primed = 1; 9740 } 9741 } 9742 9743 /* 9744 * Called to indicate that probes should be provided due to retained 9745 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 9746 * must take an initial lap through the enabling calling the dtps_provide() 9747 * entry point explicitly to allow for autocreated probes. 9748 */ 9749 static void 9750 dtrace_enabling_provide(dtrace_provider_t *prv) 9751 { 9752 int i, all = 0; 9753 dtrace_probedesc_t desc; 9754 9755 ASSERT(MUTEX_HELD(&dtrace_lock)); 9756 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9757 9758 if (prv == NULL) { 9759 all = 1; 9760 prv = dtrace_provider; 9761 } 9762 9763 do { 9764 dtrace_enabling_t *enab = dtrace_retained; 9765 void *parg = prv->dtpv_arg; 9766 9767 for (; enab != NULL; enab = enab->dten_next) { 9768 for (i = 0; i < enab->dten_ndesc; i++) { 9769 desc = enab->dten_desc[i]->dted_probe; 9770 mutex_exit(&dtrace_lock); 9771 prv->dtpv_pops.dtps_provide(parg, &desc); 9772 mutex_enter(&dtrace_lock); 9773 } 9774 } 9775 } while (all && (prv = prv->dtpv_next) != NULL); 9776 9777 mutex_exit(&dtrace_lock); 9778 dtrace_probe_provide(NULL, all ? NULL : prv); 9779 mutex_enter(&dtrace_lock); 9780 } 9781 9782 /* 9783 * DTrace DOF Functions 9784 */ 9785 /*ARGSUSED*/ 9786 static void 9787 dtrace_dof_error(dof_hdr_t *dof, const char *str) 9788 { 9789 if (dtrace_err_verbose) 9790 cmn_err(CE_WARN, "failed to process DOF: %s", str); 9791 9792 #ifdef DTRACE_ERRDEBUG 9793 dtrace_errdebug(str); 9794 #endif 9795 } 9796 9797 /* 9798 * Create DOF out of a currently enabled state. Right now, we only create 9799 * DOF containing the run-time options -- but this could be expanded to create 9800 * complete DOF representing the enabled state. 9801 */ 9802 static dof_hdr_t * 9803 dtrace_dof_create(dtrace_state_t *state) 9804 { 9805 dof_hdr_t *dof; 9806 dof_sec_t *sec; 9807 dof_optdesc_t *opt; 9808 int i, len = sizeof (dof_hdr_t) + 9809 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 9810 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9811 9812 ASSERT(MUTEX_HELD(&dtrace_lock)); 9813 9814 dof = kmem_zalloc(len, KM_SLEEP); 9815 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 9816 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 9817 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 9818 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 9819 9820 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 9821 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 9822 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION_1; 9823 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 9824 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 9825 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 9826 9827 dof->dofh_flags = 0; 9828 dof->dofh_hdrsize = sizeof (dof_hdr_t); 9829 dof->dofh_secsize = sizeof (dof_sec_t); 9830 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 9831 dof->dofh_secoff = sizeof (dof_hdr_t); 9832 dof->dofh_loadsz = len; 9833 dof->dofh_filesz = len; 9834 dof->dofh_pad = 0; 9835 9836 /* 9837 * Fill in the option section header... 9838 */ 9839 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 9840 sec->dofs_type = DOF_SECT_OPTDESC; 9841 sec->dofs_align = sizeof (uint64_t); 9842 sec->dofs_flags = DOF_SECF_LOAD; 9843 sec->dofs_entsize = sizeof (dof_optdesc_t); 9844 9845 opt = (dof_optdesc_t *)((uintptr_t)sec + 9846 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 9847 9848 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 9849 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9850 9851 for (i = 0; i < DTRACEOPT_MAX; i++) { 9852 opt[i].dofo_option = i; 9853 opt[i].dofo_strtab = DOF_SECIDX_NONE; 9854 opt[i].dofo_value = state->dts_options[i]; 9855 } 9856 9857 return (dof); 9858 } 9859 9860 static dof_hdr_t * 9861 dtrace_dof_copyin(uintptr_t uarg, int *errp) 9862 { 9863 dof_hdr_t hdr, *dof; 9864 9865 ASSERT(!MUTEX_HELD(&dtrace_lock)); 9866 9867 /* 9868 * First, we're going to copyin() the sizeof (dof_hdr_t). 9869 */ 9870 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 9871 dtrace_dof_error(NULL, "failed to copyin DOF header"); 9872 *errp = EFAULT; 9873 return (NULL); 9874 } 9875 9876 /* 9877 * Now we'll allocate the entire DOF and copy it in -- provided 9878 * that the length isn't outrageous. 9879 */ 9880 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 9881 dtrace_dof_error(&hdr, "load size exceeds maximum"); 9882 *errp = E2BIG; 9883 return (NULL); 9884 } 9885 9886 if (hdr.dofh_loadsz < sizeof (hdr)) { 9887 dtrace_dof_error(&hdr, "invalid load size"); 9888 *errp = EINVAL; 9889 return (NULL); 9890 } 9891 9892 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 9893 9894 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 9895 kmem_free(dof, hdr.dofh_loadsz); 9896 *errp = EFAULT; 9897 return (NULL); 9898 } 9899 9900 return (dof); 9901 } 9902 9903 static dof_hdr_t * 9904 dtrace_dof_property(const char *name) 9905 { 9906 uchar_t *buf; 9907 uint64_t loadsz; 9908 unsigned int len, i; 9909 dof_hdr_t *dof; 9910 9911 /* 9912 * Unfortunately, array of values in .conf files are always (and 9913 * only) interpreted to be integer arrays. We must read our DOF 9914 * as an integer array, and then squeeze it into a byte array. 9915 */ 9916 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 9917 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 9918 return (NULL); 9919 9920 for (i = 0; i < len; i++) 9921 buf[i] = (uchar_t)(((int *)buf)[i]); 9922 9923 if (len < sizeof (dof_hdr_t)) { 9924 ddi_prop_free(buf); 9925 dtrace_dof_error(NULL, "truncated header"); 9926 return (NULL); 9927 } 9928 9929 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 9930 ddi_prop_free(buf); 9931 dtrace_dof_error(NULL, "truncated DOF"); 9932 return (NULL); 9933 } 9934 9935 if (loadsz >= dtrace_dof_maxsize) { 9936 ddi_prop_free(buf); 9937 dtrace_dof_error(NULL, "oversized DOF"); 9938 return (NULL); 9939 } 9940 9941 dof = kmem_alloc(loadsz, KM_SLEEP); 9942 bcopy(buf, dof, loadsz); 9943 ddi_prop_free(buf); 9944 9945 return (dof); 9946 } 9947 9948 static void 9949 dtrace_dof_destroy(dof_hdr_t *dof) 9950 { 9951 kmem_free(dof, dof->dofh_loadsz); 9952 } 9953 9954 /* 9955 * Return the dof_sec_t pointer corresponding to a given section index. If the 9956 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 9957 * a type other than DOF_SECT_NONE is specified, the header is checked against 9958 * this type and NULL is returned if the types do not match. 9959 */ 9960 static dof_sec_t * 9961 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 9962 { 9963 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 9964 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 9965 9966 if (i >= dof->dofh_secnum) { 9967 dtrace_dof_error(dof, "referenced section index is invalid"); 9968 return (NULL); 9969 } 9970 9971 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 9972 dtrace_dof_error(dof, "referenced section is not loadable"); 9973 return (NULL); 9974 } 9975 9976 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 9977 dtrace_dof_error(dof, "referenced section is the wrong type"); 9978 return (NULL); 9979 } 9980 9981 return (sec); 9982 } 9983 9984 static dtrace_probedesc_t * 9985 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 9986 { 9987 dof_probedesc_t *probe; 9988 dof_sec_t *strtab; 9989 uintptr_t daddr = (uintptr_t)dof; 9990 uintptr_t str; 9991 size_t size; 9992 9993 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 9994 dtrace_dof_error(dof, "invalid probe section"); 9995 return (NULL); 9996 } 9997 9998 if (sec->dofs_align != sizeof (dof_secidx_t)) { 9999 dtrace_dof_error(dof, "bad alignment in probe description"); 10000 return (NULL); 10001 } 10002 10003 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10004 dtrace_dof_error(dof, "truncated probe description"); 10005 return (NULL); 10006 } 10007 10008 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10009 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10010 10011 if (strtab == NULL) 10012 return (NULL); 10013 10014 str = daddr + strtab->dofs_offset; 10015 size = strtab->dofs_size; 10016 10017 if (probe->dofp_provider >= strtab->dofs_size) { 10018 dtrace_dof_error(dof, "corrupt probe provider"); 10019 return (NULL); 10020 } 10021 10022 (void) strncpy(desc->dtpd_provider, 10023 (char *)(str + probe->dofp_provider), 10024 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10025 10026 if (probe->dofp_mod >= strtab->dofs_size) { 10027 dtrace_dof_error(dof, "corrupt probe module"); 10028 return (NULL); 10029 } 10030 10031 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10032 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10033 10034 if (probe->dofp_func >= strtab->dofs_size) { 10035 dtrace_dof_error(dof, "corrupt probe function"); 10036 return (NULL); 10037 } 10038 10039 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10040 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10041 10042 if (probe->dofp_name >= strtab->dofs_size) { 10043 dtrace_dof_error(dof, "corrupt probe name"); 10044 return (NULL); 10045 } 10046 10047 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10048 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10049 10050 return (desc); 10051 } 10052 10053 static dtrace_difo_t * 10054 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10055 cred_t *cr) 10056 { 10057 dtrace_difo_t *dp; 10058 size_t ttl = 0; 10059 dof_difohdr_t *dofd; 10060 uintptr_t daddr = (uintptr_t)dof; 10061 size_t max = dtrace_difo_maxsize; 10062 int i, l, n; 10063 10064 static const struct { 10065 int section; 10066 int bufoffs; 10067 int lenoffs; 10068 int entsize; 10069 int align; 10070 const char *msg; 10071 } difo[] = { 10072 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 10073 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 10074 sizeof (dif_instr_t), "multiple DIF sections" }, 10075 10076 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 10077 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 10078 sizeof (uint64_t), "multiple integer tables" }, 10079 10080 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 10081 offsetof(dtrace_difo_t, dtdo_strlen), 0, 10082 sizeof (char), "multiple string tables" }, 10083 10084 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 10085 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 10086 sizeof (uint_t), "multiple variable tables" }, 10087 10088 { DOF_SECT_NONE, 0, 0, 0, NULL } 10089 }; 10090 10091 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 10092 dtrace_dof_error(dof, "invalid DIFO header section"); 10093 return (NULL); 10094 } 10095 10096 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10097 dtrace_dof_error(dof, "bad alignment in DIFO header"); 10098 return (NULL); 10099 } 10100 10101 if (sec->dofs_size < sizeof (dof_difohdr_t) || 10102 sec->dofs_size % sizeof (dof_secidx_t)) { 10103 dtrace_dof_error(dof, "bad size in DIFO header"); 10104 return (NULL); 10105 } 10106 10107 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10108 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 10109 10110 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10111 dp->dtdo_rtype = dofd->dofd_rtype; 10112 10113 for (l = 0; l < n; l++) { 10114 dof_sec_t *subsec; 10115 void **bufp; 10116 uint32_t *lenp; 10117 10118 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 10119 dofd->dofd_links[l])) == NULL) 10120 goto err; /* invalid section link */ 10121 10122 if (ttl + subsec->dofs_size > max) { 10123 dtrace_dof_error(dof, "exceeds maximum size"); 10124 goto err; 10125 } 10126 10127 ttl += subsec->dofs_size; 10128 10129 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 10130 if (subsec->dofs_type != difo[i].section) 10131 continue; 10132 10133 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 10134 dtrace_dof_error(dof, "section not loaded"); 10135 goto err; 10136 } 10137 10138 if (subsec->dofs_align != difo[i].align) { 10139 dtrace_dof_error(dof, "bad alignment"); 10140 goto err; 10141 } 10142 10143 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 10144 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 10145 10146 if (*bufp != NULL) { 10147 dtrace_dof_error(dof, difo[i].msg); 10148 goto err; 10149 } 10150 10151 if (difo[i].entsize != subsec->dofs_entsize) { 10152 dtrace_dof_error(dof, "entry size mismatch"); 10153 goto err; 10154 } 10155 10156 if (subsec->dofs_entsize != 0 && 10157 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 10158 dtrace_dof_error(dof, "corrupt entry size"); 10159 goto err; 10160 } 10161 10162 *lenp = subsec->dofs_size; 10163 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 10164 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 10165 *bufp, subsec->dofs_size); 10166 10167 if (subsec->dofs_entsize != 0) 10168 *lenp /= subsec->dofs_entsize; 10169 10170 break; 10171 } 10172 10173 /* 10174 * If we encounter a loadable DIFO sub-section that is not 10175 * known to us, assume this is a broken program and fail. 10176 */ 10177 if (difo[i].section == DOF_SECT_NONE && 10178 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10179 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10180 goto err; 10181 } 10182 } 10183 10184 if (dp->dtdo_buf == NULL) { 10185 /* 10186 * We can't have a DIF object without DIF text. 10187 */ 10188 dtrace_dof_error(dof, "missing DIF text"); 10189 goto err; 10190 } 10191 10192 /* 10193 * Before we validate the DIF object, run through the variable table 10194 * looking for the strings -- if any of their size are under, we'll set 10195 * their size to be the system-wide default string size. Note that 10196 * this should _not_ happen if the "strsize" option has been set -- 10197 * in this case, the compiler should have set the size to reflect the 10198 * setting of the option. 10199 */ 10200 for (i = 0; i < dp->dtdo_varlen; i++) { 10201 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10202 dtrace_diftype_t *t = &v->dtdv_type; 10203 10204 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10205 continue; 10206 10207 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10208 t->dtdt_size = dtrace_strsize_default; 10209 } 10210 10211 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10212 goto err; 10213 10214 dtrace_difo_init(dp, vstate); 10215 return (dp); 10216 10217 err: 10218 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10219 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10220 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10221 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10222 10223 kmem_free(dp, sizeof (dtrace_difo_t)); 10224 return (NULL); 10225 } 10226 10227 static dtrace_predicate_t * 10228 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10229 cred_t *cr) 10230 { 10231 dtrace_difo_t *dp; 10232 10233 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10234 return (NULL); 10235 10236 return (dtrace_predicate_create(dp)); 10237 } 10238 10239 static dtrace_actdesc_t * 10240 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10241 cred_t *cr) 10242 { 10243 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10244 dof_actdesc_t *desc; 10245 dof_sec_t *difosec; 10246 size_t offs; 10247 uintptr_t daddr = (uintptr_t)dof; 10248 uint64_t arg; 10249 dtrace_actkind_t kind; 10250 10251 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10252 dtrace_dof_error(dof, "invalid action section"); 10253 return (NULL); 10254 } 10255 10256 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10257 dtrace_dof_error(dof, "truncated action description"); 10258 return (NULL); 10259 } 10260 10261 if (sec->dofs_align != sizeof (uint64_t)) { 10262 dtrace_dof_error(dof, "bad alignment in action description"); 10263 return (NULL); 10264 } 10265 10266 if (sec->dofs_size < sec->dofs_entsize) { 10267 dtrace_dof_error(dof, "section entry size exceeds total size"); 10268 return (NULL); 10269 } 10270 10271 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10272 dtrace_dof_error(dof, "bad entry size in action description"); 10273 return (NULL); 10274 } 10275 10276 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10277 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10278 return (NULL); 10279 } 10280 10281 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 10282 desc = (dof_actdesc_t *)(daddr + 10283 (uintptr_t)sec->dofs_offset + offs); 10284 kind = (dtrace_actkind_t)desc->dofa_kind; 10285 10286 if (DTRACEACT_ISPRINTFLIKE(kind) && 10287 (kind != DTRACEACT_PRINTA || 10288 desc->dofa_strtab != DOF_SECIDX_NONE)) { 10289 dof_sec_t *strtab; 10290 char *str, *fmt; 10291 uint64_t i; 10292 10293 /* 10294 * printf()-like actions must have a format string. 10295 */ 10296 if ((strtab = dtrace_dof_sect(dof, 10297 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 10298 goto err; 10299 10300 str = (char *)((uintptr_t)dof + 10301 (uintptr_t)strtab->dofs_offset); 10302 10303 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 10304 if (str[i] == '\0') 10305 break; 10306 } 10307 10308 if (i >= strtab->dofs_size) { 10309 dtrace_dof_error(dof, "bogus format string"); 10310 goto err; 10311 } 10312 10313 if (i == desc->dofa_arg) { 10314 dtrace_dof_error(dof, "empty format string"); 10315 goto err; 10316 } 10317 10318 i -= desc->dofa_arg; 10319 fmt = kmem_alloc(i + 1, KM_SLEEP); 10320 bcopy(&str[desc->dofa_arg], fmt, i + 1); 10321 arg = (uint64_t)(uintptr_t)fmt; 10322 } else { 10323 if (kind == DTRACEACT_PRINTA) { 10324 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 10325 arg = 0; 10326 } else { 10327 arg = desc->dofa_arg; 10328 } 10329 } 10330 10331 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 10332 desc->dofa_uarg, arg); 10333 10334 if (last != NULL) { 10335 last->dtad_next = act; 10336 } else { 10337 first = act; 10338 } 10339 10340 last = act; 10341 10342 if (desc->dofa_difo == DOF_SECIDX_NONE) 10343 continue; 10344 10345 if ((difosec = dtrace_dof_sect(dof, 10346 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 10347 goto err; 10348 10349 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 10350 10351 if (act->dtad_difo == NULL) 10352 goto err; 10353 } 10354 10355 ASSERT(first != NULL); 10356 return (first); 10357 10358 err: 10359 for (act = first; act != NULL; act = next) { 10360 next = act->dtad_next; 10361 dtrace_actdesc_release(act, vstate); 10362 } 10363 10364 return (NULL); 10365 } 10366 10367 static dtrace_ecbdesc_t * 10368 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10369 cred_t *cr) 10370 { 10371 dtrace_ecbdesc_t *ep; 10372 dof_ecbdesc_t *ecb; 10373 dtrace_probedesc_t *desc; 10374 dtrace_predicate_t *pred = NULL; 10375 10376 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 10377 dtrace_dof_error(dof, "truncated ECB description"); 10378 return (NULL); 10379 } 10380 10381 if (sec->dofs_align != sizeof (uint64_t)) { 10382 dtrace_dof_error(dof, "bad alignment in ECB description"); 10383 return (NULL); 10384 } 10385 10386 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 10387 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 10388 10389 if (sec == NULL) 10390 return (NULL); 10391 10392 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10393 ep->dted_uarg = ecb->dofe_uarg; 10394 desc = &ep->dted_probe; 10395 10396 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 10397 goto err; 10398 10399 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 10400 if ((sec = dtrace_dof_sect(dof, 10401 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 10402 goto err; 10403 10404 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 10405 goto err; 10406 10407 ep->dted_pred.dtpdd_predicate = pred; 10408 } 10409 10410 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 10411 if ((sec = dtrace_dof_sect(dof, 10412 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 10413 goto err; 10414 10415 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 10416 10417 if (ep->dted_action == NULL) 10418 goto err; 10419 } 10420 10421 return (ep); 10422 10423 err: 10424 if (pred != NULL) 10425 dtrace_predicate_release(pred, vstate); 10426 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10427 return (NULL); 10428 } 10429 10430 /* 10431 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 10432 * specified DOF. At present, this amounts to simply adding 'ubase' to the 10433 * site of any user SETX relocations to account for load object base address. 10434 * In the future, if we need other relocations, this function can be extended. 10435 */ 10436 static int 10437 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 10438 { 10439 uintptr_t daddr = (uintptr_t)dof; 10440 dof_relohdr_t *dofr = 10441 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10442 dof_sec_t *ss, *rs, *ts; 10443 dof_relodesc_t *r; 10444 uint_t i, n; 10445 10446 if (sec->dofs_size < sizeof (dof_relohdr_t) || 10447 sec->dofs_align != sizeof (dof_secidx_t)) { 10448 dtrace_dof_error(dof, "invalid relocation header"); 10449 return (-1); 10450 } 10451 10452 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 10453 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 10454 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 10455 10456 if (ss == NULL || rs == NULL || ts == NULL) 10457 return (-1); /* dtrace_dof_error() has been called already */ 10458 10459 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 10460 rs->dofs_align != sizeof (uint64_t)) { 10461 dtrace_dof_error(dof, "invalid relocation section"); 10462 return (-1); 10463 } 10464 10465 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 10466 n = rs->dofs_size / rs->dofs_entsize; 10467 10468 for (i = 0; i < n; i++) { 10469 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 10470 10471 switch (r->dofr_type) { 10472 case DOF_RELO_NONE: 10473 break; 10474 case DOF_RELO_SETX: 10475 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 10476 sizeof (uint64_t) > ts->dofs_size) { 10477 dtrace_dof_error(dof, "bad relocation offset"); 10478 return (-1); 10479 } 10480 10481 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 10482 dtrace_dof_error(dof, "misaligned setx relo"); 10483 return (-1); 10484 } 10485 10486 *(uint64_t *)taddr += ubase; 10487 break; 10488 default: 10489 dtrace_dof_error(dof, "invalid relocation type"); 10490 return (-1); 10491 } 10492 10493 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 10494 } 10495 10496 return (0); 10497 } 10498 10499 /* 10500 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 10501 * header: it should be at the front of a memory region that is at least 10502 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 10503 * size. It need not be validated in any other way. 10504 */ 10505 static int 10506 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 10507 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 10508 { 10509 uint64_t len = dof->dofh_loadsz, seclen; 10510 uintptr_t daddr = (uintptr_t)dof; 10511 dtrace_ecbdesc_t *ep; 10512 dtrace_enabling_t *enab; 10513 uint_t i; 10514 10515 ASSERT(MUTEX_HELD(&dtrace_lock)); 10516 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 10517 10518 /* 10519 * Check the DOF header identification bytes. In addition to checking 10520 * valid settings, we also verify that unused bits/bytes are zeroed so 10521 * we can use them later without fear of regressing existing binaries. 10522 */ 10523 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 10524 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 10525 dtrace_dof_error(dof, "DOF magic string mismatch"); 10526 return (-1); 10527 } 10528 10529 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 10530 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 10531 dtrace_dof_error(dof, "DOF has invalid data model"); 10532 return (-1); 10533 } 10534 10535 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 10536 dtrace_dof_error(dof, "DOF encoding mismatch"); 10537 return (-1); 10538 } 10539 10540 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 10541 dtrace_dof_error(dof, "DOF version mismatch"); 10542 return (-1); 10543 } 10544 10545 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 10546 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 10547 return (-1); 10548 } 10549 10550 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 10551 dtrace_dof_error(dof, "DOF uses too many integer registers"); 10552 return (-1); 10553 } 10554 10555 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 10556 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 10557 return (-1); 10558 } 10559 10560 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 10561 if (dof->dofh_ident[i] != 0) { 10562 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 10563 return (-1); 10564 } 10565 } 10566 10567 if (dof->dofh_flags & ~DOF_FL_VALID) { 10568 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 10569 return (-1); 10570 } 10571 10572 if (dof->dofh_secsize == 0) { 10573 dtrace_dof_error(dof, "zero section header size"); 10574 return (-1); 10575 } 10576 10577 /* 10578 * Check that the section headers don't exceed the amount of DOF 10579 * data. Note that we cast the section size and number of sections 10580 * to uint64_t's to prevent possible overflow in the multiplication. 10581 */ 10582 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 10583 10584 if (dof->dofh_secoff > len || seclen > len || 10585 dof->dofh_secoff + seclen > len) { 10586 dtrace_dof_error(dof, "truncated section headers"); 10587 return (-1); 10588 } 10589 10590 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 10591 dtrace_dof_error(dof, "misaligned section headers"); 10592 return (-1); 10593 } 10594 10595 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 10596 dtrace_dof_error(dof, "misaligned section size"); 10597 return (-1); 10598 } 10599 10600 /* 10601 * Take an initial pass through the section headers to be sure that 10602 * the headers don't have stray offsets. If the 'noprobes' flag is 10603 * set, do not permit sections relating to providers, probes, or args. 10604 */ 10605 for (i = 0; i < dof->dofh_secnum; i++) { 10606 dof_sec_t *sec = (dof_sec_t *)(daddr + 10607 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10608 10609 if (noprobes) { 10610 switch (sec->dofs_type) { 10611 case DOF_SECT_PROVIDER: 10612 case DOF_SECT_PROBES: 10613 case DOF_SECT_PRARGS: 10614 case DOF_SECT_PROFFS: 10615 dtrace_dof_error(dof, "illegal sections " 10616 "for enabling"); 10617 return (-1); 10618 } 10619 } 10620 10621 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10622 continue; /* just ignore non-loadable sections */ 10623 10624 if (sec->dofs_align & (sec->dofs_align - 1)) { 10625 dtrace_dof_error(dof, "bad section alignment"); 10626 return (-1); 10627 } 10628 10629 if (sec->dofs_offset & (sec->dofs_align - 1)) { 10630 dtrace_dof_error(dof, "misaligned section"); 10631 return (-1); 10632 } 10633 10634 if (sec->dofs_offset > len || sec->dofs_size > len || 10635 sec->dofs_offset + sec->dofs_size > len) { 10636 dtrace_dof_error(dof, "corrupt section header"); 10637 return (-1); 10638 } 10639 10640 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 10641 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 10642 dtrace_dof_error(dof, "non-terminating string table"); 10643 return (-1); 10644 } 10645 } 10646 10647 /* 10648 * Take a second pass through the sections and locate and perform any 10649 * relocations that are present. We do this after the first pass to 10650 * be sure that all sections have had their headers validated. 10651 */ 10652 for (i = 0; i < dof->dofh_secnum; i++) { 10653 dof_sec_t *sec = (dof_sec_t *)(daddr + 10654 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10655 10656 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10657 continue; /* skip sections that are not loadable */ 10658 10659 switch (sec->dofs_type) { 10660 case DOF_SECT_URELHDR: 10661 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 10662 return (-1); 10663 break; 10664 } 10665 } 10666 10667 if ((enab = *enabp) == NULL) 10668 enab = *enabp = dtrace_enabling_create(vstate); 10669 10670 for (i = 0; i < dof->dofh_secnum; i++) { 10671 dof_sec_t *sec = (dof_sec_t *)(daddr + 10672 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10673 10674 if (sec->dofs_type != DOF_SECT_ECBDESC) 10675 continue; 10676 10677 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 10678 dtrace_enabling_destroy(enab); 10679 *enabp = NULL; 10680 return (-1); 10681 } 10682 10683 dtrace_enabling_add(enab, ep); 10684 } 10685 10686 return (0); 10687 } 10688 10689 /* 10690 * Process DOF for any options. This routine assumes that the DOF has been 10691 * at least processed by dtrace_dof_slurp(). 10692 */ 10693 static int 10694 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 10695 { 10696 int i, rval; 10697 uint32_t entsize; 10698 size_t offs; 10699 dof_optdesc_t *desc; 10700 10701 for (i = 0; i < dof->dofh_secnum; i++) { 10702 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 10703 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10704 10705 if (sec->dofs_type != DOF_SECT_OPTDESC) 10706 continue; 10707 10708 if (sec->dofs_align != sizeof (uint64_t)) { 10709 dtrace_dof_error(dof, "bad alignment in " 10710 "option description"); 10711 return (EINVAL); 10712 } 10713 10714 if ((entsize = sec->dofs_entsize) == 0) { 10715 dtrace_dof_error(dof, "zeroed option entry size"); 10716 return (EINVAL); 10717 } 10718 10719 if (entsize < sizeof (dof_optdesc_t)) { 10720 dtrace_dof_error(dof, "bad option entry size"); 10721 return (EINVAL); 10722 } 10723 10724 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 10725 desc = (dof_optdesc_t *)((uintptr_t)dof + 10726 (uintptr_t)sec->dofs_offset + offs); 10727 10728 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 10729 dtrace_dof_error(dof, "non-zero option string"); 10730 return (EINVAL); 10731 } 10732 10733 if (desc->dofo_value == DTRACEOPT_UNSET) { 10734 dtrace_dof_error(dof, "unset option"); 10735 return (EINVAL); 10736 } 10737 10738 if ((rval = dtrace_state_option(state, 10739 desc->dofo_option, desc->dofo_value)) != 0) { 10740 dtrace_dof_error(dof, "rejected option"); 10741 return (rval); 10742 } 10743 } 10744 } 10745 10746 return (0); 10747 } 10748 10749 /* 10750 * DTrace Consumer State Functions 10751 */ 10752 int 10753 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 10754 { 10755 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 10756 void *base; 10757 uintptr_t limit; 10758 dtrace_dynvar_t *dvar, *next, *start; 10759 int i; 10760 10761 ASSERT(MUTEX_HELD(&dtrace_lock)); 10762 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 10763 10764 bzero(dstate, sizeof (dtrace_dstate_t)); 10765 10766 if ((dstate->dtds_chunksize = chunksize) == 0) 10767 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 10768 10769 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 10770 size = min; 10771 10772 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10773 return (ENOMEM); 10774 10775 dstate->dtds_size = size; 10776 dstate->dtds_base = base; 10777 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 10778 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 10779 10780 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 10781 10782 if (hashsize != 1 && (hashsize & 1)) 10783 hashsize--; 10784 10785 dstate->dtds_hashsize = hashsize; 10786 dstate->dtds_hash = dstate->dtds_base; 10787 10788 /* 10789 * Determine number of active CPUs. Divide free list evenly among 10790 * active CPUs. 10791 */ 10792 start = (dtrace_dynvar_t *) 10793 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 10794 limit = (uintptr_t)base + size; 10795 10796 maxper = (limit - (uintptr_t)start) / NCPU; 10797 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 10798 10799 for (i = 0; i < NCPU; i++) { 10800 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 10801 10802 /* 10803 * If we don't even have enough chunks to make it once through 10804 * NCPUs, we're just going to allocate everything to the first 10805 * CPU. And if we're on the last CPU, we're going to allocate 10806 * whatever is left over. In either case, we set the limit to 10807 * be the limit of the dynamic variable space. 10808 */ 10809 if (maxper == 0 || i == NCPU - 1) { 10810 limit = (uintptr_t)base + size; 10811 start = NULL; 10812 } else { 10813 limit = (uintptr_t)start + maxper; 10814 start = (dtrace_dynvar_t *)limit; 10815 } 10816 10817 ASSERT(limit <= (uintptr_t)base + size); 10818 10819 for (;;) { 10820 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 10821 dstate->dtds_chunksize); 10822 10823 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 10824 break; 10825 10826 dvar->dtdv_next = next; 10827 dvar = next; 10828 } 10829 10830 if (maxper == 0) 10831 break; 10832 } 10833 10834 return (0); 10835 } 10836 10837 void 10838 dtrace_dstate_fini(dtrace_dstate_t *dstate) 10839 { 10840 ASSERT(MUTEX_HELD(&cpu_lock)); 10841 10842 if (dstate->dtds_base == NULL) 10843 return; 10844 10845 kmem_free(dstate->dtds_base, dstate->dtds_size); 10846 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 10847 } 10848 10849 static void 10850 dtrace_vstate_fini(dtrace_vstate_t *vstate) 10851 { 10852 /* 10853 * Logical XOR, where are you? 10854 */ 10855 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 10856 10857 if (vstate->dtvs_nglobals > 0) { 10858 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 10859 sizeof (dtrace_statvar_t *)); 10860 } 10861 10862 if (vstate->dtvs_ntlocals > 0) { 10863 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 10864 sizeof (dtrace_difv_t)); 10865 } 10866 10867 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 10868 10869 if (vstate->dtvs_nlocals > 0) { 10870 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 10871 sizeof (dtrace_statvar_t *)); 10872 } 10873 } 10874 10875 static void 10876 dtrace_state_clean(dtrace_state_t *state) 10877 { 10878 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 10879 return; 10880 10881 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 10882 dtrace_speculation_clean(state); 10883 } 10884 10885 static void 10886 dtrace_state_deadman(dtrace_state_t *state) 10887 { 10888 hrtime_t now; 10889 10890 dtrace_sync(); 10891 10892 now = dtrace_gethrtime(); 10893 10894 if (state != dtrace_anon.dta_state && 10895 now - state->dts_laststatus >= dtrace_deadman_user) 10896 return; 10897 10898 /* 10899 * We must be sure that dts_alive never appears to be less than the 10900 * value upon entry to dtrace_state_deadman(), and because we lack a 10901 * dtrace_cas64(), we cannot store to it atomically. We thus instead 10902 * store INT64_MAX to it, followed by a memory barrier, followed by 10903 * the new value. This assures that dts_alive never appears to be 10904 * less than its true value, regardless of the order in which the 10905 * stores to the underlying storage are issued. 10906 */ 10907 state->dts_alive = INT64_MAX; 10908 dtrace_membar_producer(); 10909 state->dts_alive = now; 10910 } 10911 10912 dtrace_state_t * 10913 dtrace_state_create(dev_t *devp, cred_t *cr) 10914 { 10915 minor_t minor; 10916 major_t major; 10917 char c[30]; 10918 dtrace_state_t *state; 10919 dtrace_optval_t *opt; 10920 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 10921 10922 ASSERT(MUTEX_HELD(&dtrace_lock)); 10923 ASSERT(MUTEX_HELD(&cpu_lock)); 10924 10925 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 10926 VM_BESTFIT | VM_SLEEP); 10927 10928 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 10929 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 10930 return (NULL); 10931 } 10932 10933 state = ddi_get_soft_state(dtrace_softstate, minor); 10934 state->dts_epid = DTRACE_EPIDNONE + 1; 10935 10936 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 10937 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 10938 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 10939 10940 if (devp != NULL) { 10941 major = getemajor(*devp); 10942 } else { 10943 major = ddi_driver_major(dtrace_devi); 10944 } 10945 10946 state->dts_dev = makedevice(major, minor); 10947 10948 if (devp != NULL) 10949 *devp = state->dts_dev; 10950 10951 /* 10952 * We allocate NCPU buffers. On the one hand, this can be quite 10953 * a bit of memory per instance (nearly 36K on a Starcat). On the 10954 * other hand, it saves an additional memory reference in the probe 10955 * path. 10956 */ 10957 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 10958 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 10959 state->dts_cleaner = CYCLIC_NONE; 10960 state->dts_deadman = CYCLIC_NONE; 10961 state->dts_vstate.dtvs_state = state; 10962 10963 for (i = 0; i < DTRACEOPT_MAX; i++) 10964 state->dts_options[i] = DTRACEOPT_UNSET; 10965 10966 /* 10967 * Set the default options. 10968 */ 10969 opt = state->dts_options; 10970 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 10971 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 10972 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 10973 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 10974 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 10975 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 10976 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 10977 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 10978 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 10979 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 10980 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 10981 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 10982 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 10983 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 10984 10985 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 10986 10987 /* 10988 * Set up the credentials for this instantiation. 10989 */ 10990 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 10991 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 10992 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 10993 } else { 10994 state->dts_cred.dcr_uid = crgetuid(cr); 10995 state->dts_cred.dcr_gid = crgetgid(cr); 10996 10997 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 10998 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 10999 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11000 } 11001 11002 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) && 11003 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11004 state->dts_cred.dcr_visible |= DTRACE_CRV_ALLPROC; 11005 state->dts_cred.dcr_action |= 11006 DTRACE_CRA_PROC_DESTRUCTIVE; 11007 } 11008 11009 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 11010 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 11011 DTRACE_CRV_ALLPROC; 11012 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 11013 DTRACE_CRA_PROC; 11014 11015 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11016 state->dts_cred.dcr_action |= 11017 DTRACE_CRA_PROC_DESTRUCTIVE; 11018 } 11019 } 11020 11021 return (state); 11022 } 11023 11024 static int 11025 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 11026 { 11027 dtrace_optval_t *opt = state->dts_options, size; 11028 processorid_t cpu; 11029 int flags = 0, rval; 11030 11031 ASSERT(MUTEX_HELD(&dtrace_lock)); 11032 ASSERT(MUTEX_HELD(&cpu_lock)); 11033 ASSERT(which < DTRACEOPT_MAX); 11034 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 11035 (state == dtrace_anon.dta_state && 11036 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 11037 11038 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 11039 return (0); 11040 11041 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 11042 cpu = opt[DTRACEOPT_CPU]; 11043 11044 if (which == DTRACEOPT_SPECSIZE) 11045 flags |= DTRACEBUF_NOSWITCH; 11046 11047 if (which == DTRACEOPT_BUFSIZE) { 11048 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 11049 flags |= DTRACEBUF_RING; 11050 11051 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 11052 flags |= DTRACEBUF_FILL; 11053 11054 flags |= DTRACEBUF_INACTIVE; 11055 } 11056 11057 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 11058 /* 11059 * The size must be 8-byte aligned. If the size is not 8-byte 11060 * aligned, drop it down by the difference. 11061 */ 11062 if (size & (sizeof (uint64_t) - 1)) 11063 size -= size & (sizeof (uint64_t) - 1); 11064 11065 if (size < state->dts_reserve) { 11066 /* 11067 * Buffers always must be large enough to accommodate 11068 * their prereserved space. We return E2BIG instead 11069 * of ENOMEM in this case to allow for user-level 11070 * software to differentiate the cases. 11071 */ 11072 return (E2BIG); 11073 } 11074 11075 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 11076 11077 if (rval != ENOMEM) { 11078 opt[which] = size; 11079 return (rval); 11080 } 11081 11082 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11083 return (rval); 11084 } 11085 11086 return (ENOMEM); 11087 } 11088 11089 static int 11090 dtrace_state_buffers(dtrace_state_t *state) 11091 { 11092 dtrace_speculation_t *spec = state->dts_speculations; 11093 int rval, i; 11094 11095 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 11096 DTRACEOPT_BUFSIZE)) != 0) 11097 return (rval); 11098 11099 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 11100 DTRACEOPT_AGGSIZE)) != 0) 11101 return (rval); 11102 11103 for (i = 0; i < state->dts_nspeculations; i++) { 11104 if ((rval = dtrace_state_buffer(state, 11105 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 11106 return (rval); 11107 } 11108 11109 return (0); 11110 } 11111 11112 static void 11113 dtrace_state_prereserve(dtrace_state_t *state) 11114 { 11115 dtrace_ecb_t *ecb; 11116 dtrace_probe_t *probe; 11117 11118 state->dts_reserve = 0; 11119 11120 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 11121 return; 11122 11123 /* 11124 * If our buffer policy is a "fill" buffer policy, we need to set the 11125 * prereserved space to be the space required by the END probes. 11126 */ 11127 probe = dtrace_probes[dtrace_probeid_end - 1]; 11128 ASSERT(probe != NULL); 11129 11130 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 11131 if (ecb->dte_state != state) 11132 continue; 11133 11134 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 11135 } 11136 } 11137 11138 static int 11139 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 11140 { 11141 dtrace_optval_t *opt = state->dts_options, sz, nspec; 11142 dtrace_speculation_t *spec; 11143 dtrace_buffer_t *buf; 11144 cyc_handler_t hdlr; 11145 cyc_time_t when; 11146 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11147 dtrace_icookie_t cookie; 11148 11149 mutex_enter(&cpu_lock); 11150 mutex_enter(&dtrace_lock); 11151 11152 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 11153 rval = EBUSY; 11154 goto out; 11155 } 11156 11157 /* 11158 * Before we can perform any checks, we must prime all of the 11159 * retained enablings that correspond to this state. 11160 */ 11161 dtrace_enabling_prime(state); 11162 11163 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 11164 rval = EACCES; 11165 goto out; 11166 } 11167 11168 dtrace_state_prereserve(state); 11169 11170 /* 11171 * Now we want to do is try to allocate our speculations. 11172 * We do not automatically resize the number of speculations; if 11173 * this fails, we will fail the operation. 11174 */ 11175 nspec = opt[DTRACEOPT_NSPEC]; 11176 ASSERT(nspec != DTRACEOPT_UNSET); 11177 11178 if (nspec > INT_MAX) { 11179 rval = ENOMEM; 11180 goto out; 11181 } 11182 11183 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 11184 11185 if (spec == NULL) { 11186 rval = ENOMEM; 11187 goto out; 11188 } 11189 11190 state->dts_speculations = spec; 11191 state->dts_nspeculations = (int)nspec; 11192 11193 for (i = 0; i < nspec; i++) { 11194 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 11195 rval = ENOMEM; 11196 goto err; 11197 } 11198 11199 spec[i].dtsp_buffer = buf; 11200 } 11201 11202 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 11203 if (dtrace_anon.dta_state == NULL) { 11204 rval = ENOENT; 11205 goto out; 11206 } 11207 11208 if (state->dts_necbs != 0) { 11209 rval = EALREADY; 11210 goto out; 11211 } 11212 11213 state->dts_anon = dtrace_anon_grab(); 11214 ASSERT(state->dts_anon != NULL); 11215 11216 *cpu = dtrace_anon.dta_beganon; 11217 11218 /* 11219 * If the anonymous state is active (as it almost certainly 11220 * is if the anonymous enabling ultimately matched anything), 11221 * we don't allow any further option processing -- but we 11222 * don't return failure. 11223 */ 11224 state = state->dts_anon; 11225 11226 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11227 goto out; 11228 } 11229 11230 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 11231 opt[DTRACEOPT_AGGSIZE] != 0) { 11232 if (state->dts_aggregations == NULL) { 11233 /* 11234 * We're not going to create an aggregation buffer 11235 * because we don't have any ECBs that contain 11236 * aggregations -- set this option to 0. 11237 */ 11238 opt[DTRACEOPT_AGGSIZE] = 0; 11239 } else { 11240 /* 11241 * If we have an aggregation buffer, we must also have 11242 * a buffer to use as scratch. 11243 */ 11244 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 11245 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 11246 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 11247 } 11248 } 11249 } 11250 11251 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 11252 opt[DTRACEOPT_SPECSIZE] != 0) { 11253 if (!state->dts_speculates) { 11254 /* 11255 * We're not going to create speculation buffers 11256 * because we don't have any ECBs that actually 11257 * speculate -- set the speculation size to 0. 11258 */ 11259 opt[DTRACEOPT_SPECSIZE] = 0; 11260 } 11261 } 11262 11263 /* 11264 * The bare minimum size for any buffer that we're actually going to 11265 * do anything to is sizeof (uint64_t). 11266 */ 11267 sz = sizeof (uint64_t); 11268 11269 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 11270 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 11271 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 11272 /* 11273 * A buffer size has been explicitly set to 0 (or to a size 11274 * that will be adjusted to 0) and we need the space -- we 11275 * need to return failure. We return ENOSPC to differentiate 11276 * it from failing to allocate a buffer due to failure to meet 11277 * the reserve (for which we return E2BIG). 11278 */ 11279 rval = ENOSPC; 11280 goto out; 11281 } 11282 11283 if ((rval = dtrace_state_buffers(state)) != 0) 11284 goto err; 11285 11286 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 11287 sz = dtrace_dstate_defsize; 11288 11289 do { 11290 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 11291 11292 if (rval == 0) 11293 break; 11294 11295 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11296 goto err; 11297 } while (sz >>= 1); 11298 11299 opt[DTRACEOPT_DYNVARSIZE] = sz; 11300 11301 if (rval != 0) 11302 goto err; 11303 11304 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 11305 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 11306 11307 if (opt[DTRACEOPT_CLEANRATE] == 0) 11308 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11309 11310 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 11311 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 11312 11313 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 11314 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11315 11316 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 11317 hdlr.cyh_arg = state; 11318 hdlr.cyh_level = CY_LOW_LEVEL; 11319 11320 when.cyt_when = 0; 11321 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 11322 11323 state->dts_cleaner = cyclic_add(&hdlr, &when); 11324 11325 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 11326 hdlr.cyh_arg = state; 11327 hdlr.cyh_level = CY_LOW_LEVEL; 11328 11329 when.cyt_when = 0; 11330 when.cyt_interval = dtrace_deadman_interval; 11331 11332 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 11333 state->dts_deadman = cyclic_add(&hdlr, &when); 11334 11335 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 11336 11337 /* 11338 * Now it's time to actually fire the BEGIN probe. We need to disable 11339 * interrupts here both to record the CPU on which we fired the BEGIN 11340 * probe (the data from this CPU will be processed first at user 11341 * level) and to manually activate the buffer for this CPU. 11342 */ 11343 cookie = dtrace_interrupt_disable(); 11344 *cpu = CPU->cpu_id; 11345 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 11346 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11347 11348 dtrace_probe(dtrace_probeid_begin, 11349 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11350 dtrace_interrupt_enable(cookie); 11351 /* 11352 * We may have had an exit action from a BEGIN probe; only change our 11353 * state to ACTIVE if we're still in WARMUP. 11354 */ 11355 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 11356 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 11357 11358 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 11359 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 11360 11361 /* 11362 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 11363 * want each CPU to transition its principal buffer out of the 11364 * INACTIVE state. Doing this assures that no CPU will suddenly begin 11365 * processing an ECB halfway down a probe's ECB chain; all CPUs will 11366 * atomically transition from processing none of a state's ECBs to 11367 * processing all of them. 11368 */ 11369 dtrace_xcall(DTRACE_CPUALL, 11370 (dtrace_xcall_t)dtrace_buffer_activate, state); 11371 goto out; 11372 11373 err: 11374 dtrace_buffer_free(state->dts_buffer); 11375 dtrace_buffer_free(state->dts_aggbuffer); 11376 11377 if ((nspec = state->dts_nspeculations) == 0) { 11378 ASSERT(state->dts_speculations == NULL); 11379 goto out; 11380 } 11381 11382 spec = state->dts_speculations; 11383 ASSERT(spec != NULL); 11384 11385 for (i = 0; i < state->dts_nspeculations; i++) { 11386 if ((buf = spec[i].dtsp_buffer) == NULL) 11387 break; 11388 11389 dtrace_buffer_free(buf); 11390 kmem_free(buf, bufsize); 11391 } 11392 11393 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11394 state->dts_nspeculations = 0; 11395 state->dts_speculations = NULL; 11396 11397 out: 11398 mutex_exit(&dtrace_lock); 11399 mutex_exit(&cpu_lock); 11400 11401 return (rval); 11402 } 11403 11404 static int 11405 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 11406 { 11407 dtrace_icookie_t cookie; 11408 11409 ASSERT(MUTEX_HELD(&dtrace_lock)); 11410 11411 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 11412 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 11413 return (EINVAL); 11414 11415 /* 11416 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 11417 * to be sure that every CPU has seen it. See below for the details 11418 * on why this is done. 11419 */ 11420 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 11421 dtrace_sync(); 11422 11423 /* 11424 * By this point, it is impossible for any CPU to be still processing 11425 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 11426 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 11427 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 11428 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 11429 * iff we're in the END probe. 11430 */ 11431 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 11432 dtrace_sync(); 11433 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 11434 11435 /* 11436 * Finally, we can release the reserve and call the END probe. We 11437 * disable interrupts across calling the END probe to allow us to 11438 * return the CPU on which we actually called the END probe. This 11439 * allows user-land to be sure that this CPU's principal buffer is 11440 * processed last. 11441 */ 11442 state->dts_reserve = 0; 11443 11444 cookie = dtrace_interrupt_disable(); 11445 *cpu = CPU->cpu_id; 11446 dtrace_probe(dtrace_probeid_end, 11447 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11448 dtrace_interrupt_enable(cookie); 11449 11450 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 11451 dtrace_sync(); 11452 11453 return (0); 11454 } 11455 11456 static int 11457 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 11458 dtrace_optval_t val) 11459 { 11460 ASSERT(MUTEX_HELD(&dtrace_lock)); 11461 11462 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11463 return (EBUSY); 11464 11465 if (option >= DTRACEOPT_MAX) 11466 return (EINVAL); 11467 11468 if (option != DTRACEOPT_CPU && val < 0) 11469 return (EINVAL); 11470 11471 switch (option) { 11472 case DTRACEOPT_DESTRUCTIVE: 11473 if (dtrace_destructive_disallow) 11474 return (EACCES); 11475 11476 state->dts_cred.dcr_destructive = 1; 11477 break; 11478 11479 case DTRACEOPT_BUFSIZE: 11480 case DTRACEOPT_DYNVARSIZE: 11481 case DTRACEOPT_AGGSIZE: 11482 case DTRACEOPT_SPECSIZE: 11483 case DTRACEOPT_STRSIZE: 11484 if (val < 0) 11485 return (EINVAL); 11486 11487 if (val >= LONG_MAX) { 11488 /* 11489 * If this is an otherwise negative value, set it to 11490 * the highest multiple of 128m less than LONG_MAX. 11491 * Technically, we're adjusting the size without 11492 * regard to the buffer resizing policy, but in fact, 11493 * this has no effect -- if we set the buffer size to 11494 * ~LONG_MAX and the buffer policy is ultimately set to 11495 * be "manual", the buffer allocation is guaranteed to 11496 * fail, if only because the allocation requires two 11497 * buffers. (We set the the size to the highest 11498 * multiple of 128m because it ensures that the size 11499 * will remain a multiple of a megabyte when 11500 * repeatedly halved -- all the way down to 15m.) 11501 */ 11502 val = LONG_MAX - (1 << 27) + 1; 11503 } 11504 } 11505 11506 state->dts_options[option] = val; 11507 11508 return (0); 11509 } 11510 11511 static void 11512 dtrace_state_destroy(dtrace_state_t *state) 11513 { 11514 dtrace_ecb_t *ecb; 11515 dtrace_vstate_t *vstate = &state->dts_vstate; 11516 minor_t minor = getminor(state->dts_dev); 11517 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11518 dtrace_speculation_t *spec = state->dts_speculations; 11519 int nspec = state->dts_nspeculations; 11520 uint32_t match; 11521 11522 ASSERT(MUTEX_HELD(&dtrace_lock)); 11523 ASSERT(MUTEX_HELD(&cpu_lock)); 11524 11525 /* 11526 * First, retract any retained enablings for this state. 11527 */ 11528 dtrace_enabling_retract(state); 11529 ASSERT(state->dts_nretained == 0); 11530 11531 /* 11532 * Now we need to disable and destroy any enabled probes. Because any 11533 * DTRACE_PRIV_KERNEL probes may actually be slowing our progress 11534 * (especially if they're all enabled), we take two passes through 11535 * the ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, 11536 * and in the second we disable whatever is left over. 11537 */ 11538 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 11539 for (i = 0; i < state->dts_necbs; i++) { 11540 if ((ecb = state->dts_ecbs[i]) == NULL) 11541 continue; 11542 11543 if (match && ecb->dte_probe != NULL) { 11544 dtrace_probe_t *probe = ecb->dte_probe; 11545 dtrace_provider_t *prov = probe->dtpr_provider; 11546 11547 if (!(prov->dtpv_priv.dtpp_flags & match)) 11548 continue; 11549 } 11550 11551 dtrace_ecb_disable(ecb); 11552 dtrace_ecb_destroy(ecb); 11553 } 11554 11555 if (!match) 11556 break; 11557 } 11558 11559 /* 11560 * Before we free the buffers, perform one more sync to assure that 11561 * every CPU is out of probe context. 11562 */ 11563 dtrace_sync(); 11564 11565 dtrace_buffer_free(state->dts_buffer); 11566 dtrace_buffer_free(state->dts_aggbuffer); 11567 11568 for (i = 0; i < nspec; i++) 11569 dtrace_buffer_free(spec[i].dtsp_buffer); 11570 11571 if (state->dts_cleaner != CYCLIC_NONE) 11572 cyclic_remove(state->dts_cleaner); 11573 11574 if (state->dts_deadman != CYCLIC_NONE) 11575 cyclic_remove(state->dts_deadman); 11576 11577 dtrace_dstate_fini(&vstate->dtvs_dynvars); 11578 dtrace_vstate_fini(vstate); 11579 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 11580 11581 if (state->dts_aggregations != NULL) { 11582 #ifdef DEBUG 11583 for (i = 0; i < state->dts_naggregations; i++) 11584 ASSERT(state->dts_aggregations[i] == NULL); 11585 #endif 11586 ASSERT(state->dts_naggregations > 0); 11587 kmem_free(state->dts_aggregations, 11588 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 11589 } 11590 11591 kmem_free(state->dts_buffer, bufsize); 11592 kmem_free(state->dts_aggbuffer, bufsize); 11593 11594 for (i = 0; i < nspec; i++) 11595 kmem_free(spec[i].dtsp_buffer, bufsize); 11596 11597 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11598 11599 dtrace_format_destroy(state); 11600 11601 vmem_destroy(state->dts_aggid_arena); 11602 ddi_soft_state_free(dtrace_softstate, minor); 11603 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11604 } 11605 11606 /* 11607 * DTrace Anonymous Enabling Functions 11608 */ 11609 static dtrace_state_t * 11610 dtrace_anon_grab(void) 11611 { 11612 dtrace_state_t *state; 11613 11614 ASSERT(MUTEX_HELD(&dtrace_lock)); 11615 11616 if ((state = dtrace_anon.dta_state) == NULL) { 11617 ASSERT(dtrace_anon.dta_enabling == NULL); 11618 return (NULL); 11619 } 11620 11621 ASSERT(dtrace_anon.dta_enabling != NULL); 11622 ASSERT(dtrace_retained != NULL); 11623 11624 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 11625 dtrace_anon.dta_enabling = NULL; 11626 dtrace_anon.dta_state = NULL; 11627 11628 return (state); 11629 } 11630 11631 static void 11632 dtrace_anon_property(void) 11633 { 11634 int i, rv; 11635 dtrace_state_t *state; 11636 dof_hdr_t *dof; 11637 char c[32]; /* enough for "dof-data-" + digits */ 11638 11639 ASSERT(MUTEX_HELD(&dtrace_lock)); 11640 ASSERT(MUTEX_HELD(&cpu_lock)); 11641 11642 for (i = 0; ; i++) { 11643 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 11644 11645 dtrace_err_verbose = 1; 11646 11647 if ((dof = dtrace_dof_property(c)) == NULL) { 11648 dtrace_err_verbose = 0; 11649 break; 11650 } 11651 11652 /* 11653 * We want to create anonymous state, so we need to transition 11654 * the kernel debugger to indicate that DTrace is active. If 11655 * this fails (e.g. because the debugger has modified text in 11656 * some way), we won't continue with the processing. 11657 */ 11658 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 11659 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 11660 "enabling ignored."); 11661 dtrace_dof_destroy(dof); 11662 break; 11663 } 11664 11665 /* 11666 * If we haven't allocated an anonymous state, we'll do so now. 11667 */ 11668 if ((state = dtrace_anon.dta_state) == NULL) { 11669 state = dtrace_state_create(NULL, NULL); 11670 dtrace_anon.dta_state = state; 11671 11672 if (state == NULL) { 11673 /* 11674 * This basically shouldn't happen: the only 11675 * failure mode from dtrace_state_create() is a 11676 * failure of ddi_soft_state_zalloc() that 11677 * itself should never happen. Still, the 11678 * interface allows for a failure mode, and 11679 * we want to fail as gracefully as possible: 11680 * we'll emit an error message and cease 11681 * processing anonymous state in this case. 11682 */ 11683 cmn_err(CE_WARN, "failed to create " 11684 "anonymous state"); 11685 dtrace_dof_destroy(dof); 11686 break; 11687 } 11688 } 11689 11690 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 11691 &dtrace_anon.dta_enabling, 0, B_TRUE); 11692 11693 if (rv == 0) 11694 rv = dtrace_dof_options(dof, state); 11695 11696 dtrace_err_verbose = 0; 11697 dtrace_dof_destroy(dof); 11698 11699 if (rv != 0) { 11700 /* 11701 * This is malformed DOF; chuck any anonymous state 11702 * that we created. 11703 */ 11704 ASSERT(dtrace_anon.dta_enabling == NULL); 11705 dtrace_state_destroy(state); 11706 dtrace_anon.dta_state = NULL; 11707 break; 11708 } 11709 11710 ASSERT(dtrace_anon.dta_enabling != NULL); 11711 } 11712 11713 if (dtrace_anon.dta_enabling != NULL) { 11714 int rval; 11715 11716 /* 11717 * dtrace_enabling_retain() can only fail because we are 11718 * trying to retain more enablings than are allowed -- but 11719 * we only have one anonymous enabling, and we are guaranteed 11720 * to be allowed at least one retained enabling; we assert 11721 * that dtrace_enabling_retain() returns success. 11722 */ 11723 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 11724 ASSERT(rval == 0); 11725 11726 dtrace_enabling_dump(dtrace_anon.dta_enabling); 11727 } 11728 } 11729 11730 /* 11731 * DTrace Helper Functions 11732 */ 11733 static void 11734 dtrace_helper_trace(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate, 11735 int where) 11736 { 11737 uint32_t size, next, nnext, i; 11738 dtrace_helptrace_t *ent; 11739 11740 if (!dtrace_helptrace_enabled) 11741 return; 11742 11743 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 11744 11745 /* 11746 * What would a tracing framework be without its own tracing 11747 * framework? (Well, a hell of a lot simpler, for starters...) 11748 */ 11749 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 11750 sizeof (uint64_t) - sizeof (uint64_t); 11751 11752 /* 11753 * Iterate until we can allocate a slot in the trace buffer. 11754 */ 11755 do { 11756 next = dtrace_helptrace_next; 11757 11758 if (next + size < dtrace_helptrace_bufsize) { 11759 nnext = next + size; 11760 } else { 11761 nnext = size; 11762 } 11763 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 11764 11765 /* 11766 * We have our slot; fill it in. 11767 */ 11768 if (nnext == size) 11769 next = 0; 11770 11771 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 11772 ent->dtht_helper = helper; 11773 ent->dtht_where = where; 11774 ent->dtht_nlocals = vstate->dtvs_nlocals; 11775 11776 for (i = 0; i < vstate->dtvs_nlocals; i++) { 11777 dtrace_statvar_t *svar; 11778 11779 if ((svar = vstate->dtvs_locals[i]) == NULL) 11780 continue; 11781 11782 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 11783 ent->dtht_locals[i] = 11784 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 11785 } 11786 } 11787 11788 static uint64_t 11789 dtrace_helper(int which, dtrace_mstate_t *mstate, 11790 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 11791 { 11792 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 11793 uint64_t sarg0 = mstate->dtms_arg[0]; 11794 uint64_t sarg1 = mstate->dtms_arg[1]; 11795 uint64_t rval; 11796 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 11797 dtrace_helper_action_t *helper; 11798 dtrace_vstate_t *vstate; 11799 dtrace_difo_t *pred; 11800 int i, trace = dtrace_helptrace_enabled; 11801 11802 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 11803 11804 if (helpers == NULL) 11805 return (0); 11806 11807 if ((helper = helpers->dthps_actions[which]) == NULL) 11808 return (0); 11809 11810 vstate = &helpers->dthps_vstate; 11811 mstate->dtms_arg[0] = arg0; 11812 mstate->dtms_arg[1] = arg1; 11813 11814 /* 11815 * Now iterate over each helper. If its predicate evaluates to 'true', 11816 * we'll call the corresponding actions. Note that the below calls 11817 * to dtrace_dif_emulate() may set faults in machine state. This is 11818 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 11819 * the stored DIF offset with its own (which is the desired behavior). 11820 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 11821 * from machine state; this is okay, too. 11822 */ 11823 for (; helper != NULL; helper = helper->dthp_next) { 11824 if ((pred = helper->dthp_predicate) != NULL) { 11825 if (trace) 11826 dtrace_helper_trace(helper, vstate, 0); 11827 11828 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 11829 goto next; 11830 11831 if (*flags & CPU_DTRACE_FAULT) 11832 goto err; 11833 } 11834 11835 for (i = 0; i < helper->dthp_nactions; i++) { 11836 if (trace) 11837 dtrace_helper_trace(helper, vstate, i + 1); 11838 11839 rval = dtrace_dif_emulate(helper->dthp_actions[i], 11840 mstate, vstate, state); 11841 11842 if (*flags & CPU_DTRACE_FAULT) 11843 goto err; 11844 } 11845 11846 next: 11847 if (trace) 11848 dtrace_helper_trace(helper, vstate, 11849 DTRACE_HELPTRACE_NEXT); 11850 } 11851 11852 if (trace) 11853 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_DONE); 11854 11855 /* 11856 * Restore the arg0 that we saved upon entry. 11857 */ 11858 mstate->dtms_arg[0] = sarg0; 11859 mstate->dtms_arg[1] = sarg1; 11860 11861 return (rval); 11862 11863 err: 11864 if (trace) 11865 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_ERR); 11866 11867 /* 11868 * Restore the arg0 that we saved upon entry. 11869 */ 11870 mstate->dtms_arg[0] = sarg0; 11871 mstate->dtms_arg[1] = sarg1; 11872 11873 return (NULL); 11874 } 11875 11876 static void 11877 dtrace_helper_destroy(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate) 11878 { 11879 int i; 11880 11881 if (helper->dthp_predicate != NULL) 11882 dtrace_difo_release(helper->dthp_predicate, vstate); 11883 11884 for (i = 0; i < helper->dthp_nactions; i++) { 11885 ASSERT(helper->dthp_actions[i] != NULL); 11886 dtrace_difo_release(helper->dthp_actions[i], vstate); 11887 } 11888 11889 kmem_free(helper->dthp_actions, 11890 helper->dthp_nactions * sizeof (dtrace_difo_t *)); 11891 kmem_free(helper, sizeof (dtrace_helper_action_t)); 11892 } 11893 11894 static int 11895 dtrace_helper_destroygen(int gen) 11896 { 11897 dtrace_helpers_t *help = curproc->p_dtrace_helpers; 11898 dtrace_vstate_t *vstate; 11899 int i; 11900 11901 ASSERT(MUTEX_HELD(&dtrace_lock)); 11902 11903 if (help == NULL || gen > help->dthps_generation) 11904 return (EINVAL); 11905 11906 vstate = &help->dthps_vstate; 11907 11908 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 11909 dtrace_helper_action_t *last = NULL, *h, *next; 11910 11911 for (h = help->dthps_actions[i]; h != NULL; h = next) { 11912 next = h->dthp_next; 11913 11914 if (h->dthp_generation == gen) { 11915 if (last != NULL) { 11916 last->dthp_next = next; 11917 } else { 11918 help->dthps_actions[i] = next; 11919 } 11920 11921 dtrace_helper_destroy(h, vstate); 11922 } else { 11923 last = h; 11924 } 11925 } 11926 } 11927 11928 return (0); 11929 } 11930 11931 static int 11932 dtrace_helper_validate(dtrace_helper_action_t *helper) 11933 { 11934 int err = 0, i; 11935 dtrace_difo_t *dp; 11936 11937 if ((dp = helper->dthp_predicate) != NULL) 11938 err += dtrace_difo_validate_helper(dp); 11939 11940 for (i = 0; i < helper->dthp_nactions; i++) 11941 err += dtrace_difo_validate_helper(helper->dthp_actions[i]); 11942 11943 return (err == 0); 11944 } 11945 11946 static int 11947 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 11948 { 11949 dtrace_helpers_t *help; 11950 dtrace_helper_action_t *helper, *last; 11951 dtrace_actdesc_t *act; 11952 dtrace_vstate_t *vstate; 11953 dtrace_predicate_t *pred; 11954 int count = 0, nactions = 0, i; 11955 11956 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 11957 return (EINVAL); 11958 11959 help = curproc->p_dtrace_helpers; 11960 last = help->dthps_actions[which]; 11961 vstate = &help->dthps_vstate; 11962 11963 for (count = 0; last != NULL; last = last->dthp_next) { 11964 count++; 11965 if (last->dthp_next == NULL) 11966 break; 11967 } 11968 11969 /* 11970 * If we already have dtrace_helper_actions_max helper actions for this 11971 * helper action type, we'll refuse to add a new one. 11972 */ 11973 if (count >= dtrace_helper_actions_max) 11974 return (ENOSPC); 11975 11976 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 11977 helper->dthp_generation = help->dthps_generation; 11978 11979 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 11980 ASSERT(pred->dtp_difo != NULL); 11981 dtrace_difo_hold(pred->dtp_difo); 11982 helper->dthp_predicate = pred->dtp_difo; 11983 } 11984 11985 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 11986 if (act->dtad_kind != DTRACEACT_DIFEXPR) 11987 goto err; 11988 11989 if (act->dtad_difo == NULL) 11990 goto err; 11991 11992 nactions++; 11993 } 11994 11995 helper->dthp_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 11996 (helper->dthp_nactions = nactions), KM_SLEEP); 11997 11998 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 11999 dtrace_difo_hold(act->dtad_difo); 12000 helper->dthp_actions[i++] = act->dtad_difo; 12001 } 12002 12003 if (!dtrace_helper_validate(helper)) 12004 goto err; 12005 12006 if (last == NULL) { 12007 help->dthps_actions[which] = helper; 12008 } else { 12009 last->dthp_next = helper; 12010 } 12011 12012 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 12013 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 12014 dtrace_helptrace_next = 0; 12015 } 12016 12017 return (0); 12018 err: 12019 dtrace_helper_destroy(helper, vstate); 12020 return (EINVAL); 12021 } 12022 12023 static void 12024 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 12025 dof_helper_t *dofhp) 12026 { 12027 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 12028 12029 mutex_enter(&dtrace_meta_lock); 12030 mutex_enter(&dtrace_lock); 12031 12032 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 12033 /* 12034 * If the dtrace module is loaded but not attached, or if 12035 * there aren't isn't a meta provider registered to deal with 12036 * these provider descriptions, we need to postpone creating 12037 * the actual providers until later. 12038 */ 12039 12040 if (help->dthps_next == NULL && help->dthps_prev == NULL && 12041 dtrace_deferred_pid != help) { 12042 help->dthps_pid = p->p_pid; 12043 help->dthps_next = dtrace_deferred_pid; 12044 help->dthps_prev = NULL; 12045 if (dtrace_deferred_pid != NULL) 12046 dtrace_deferred_pid->dthps_prev = help; 12047 dtrace_deferred_pid = help; 12048 } 12049 12050 mutex_exit(&dtrace_lock); 12051 12052 } else if (dofhp != NULL) { 12053 /* 12054 * If the dtrace module is loaded and we have a particular 12055 * helper provider description, pass that off to the 12056 * meta provider. 12057 */ 12058 12059 mutex_exit(&dtrace_lock); 12060 12061 dtrace_helper_provide(dofhp, p->p_pid); 12062 12063 } else { 12064 /* 12065 * Otherwise, just pass all the helper provider descriptions 12066 * off to the meta provider. 12067 */ 12068 12069 int i; 12070 mutex_exit(&dtrace_lock); 12071 12072 for (i = 0; i < help->dthps_nprovs; i++) { 12073 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 12074 p->p_pid); 12075 } 12076 } 12077 12078 mutex_exit(&dtrace_meta_lock); 12079 } 12080 12081 static int 12082 dtrace_helper_provider_add(dof_helper_t *dofhp) 12083 { 12084 dtrace_helpers_t *help; 12085 dtrace_helper_provider_t *hprov, **tmp_provs; 12086 uint_t tmp_nprovs, i; 12087 12088 help = curproc->p_dtrace_helpers; 12089 ASSERT(help != NULL); 12090 12091 /* 12092 * If we already have dtrace_helper_providers_max helper providers, 12093 * we're refuse to add a new one. 12094 */ 12095 if (help->dthps_nprovs >= dtrace_helper_providers_max) 12096 return (ENOSPC); 12097 12098 /* 12099 * Check to make sure this isn't a duplicate. 12100 */ 12101 for (i = 0; i < help->dthps_nprovs; i++) { 12102 if (dofhp->dofhp_addr == 12103 help->dthps_provs[i]->dthp_prov.dofhp_addr) 12104 return (EALREADY); 12105 } 12106 12107 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 12108 hprov->dthp_prov = *dofhp; 12109 hprov->dthp_ref = 1; 12110 12111 tmp_nprovs = help->dthps_nprovs; 12112 tmp_provs = help->dthps_provs; 12113 help->dthps_nprovs++; 12114 help->dthps_provs = kmem_zalloc(help->dthps_nprovs * 12115 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12116 12117 help->dthps_provs[tmp_nprovs] = hprov; 12118 if (tmp_provs != NULL) { 12119 bcopy(tmp_provs, help->dthps_provs, tmp_nprovs * 12120 sizeof (dtrace_helper_provider_t *)); 12121 kmem_free(tmp_provs, tmp_nprovs * 12122 sizeof (dtrace_helper_provider_t *)); 12123 } 12124 12125 return (0); 12126 } 12127 12128 static void 12129 dtrace_helper_provider_remove(dtrace_helper_provider_t *hprov) 12130 { 12131 mutex_enter(&dtrace_lock); 12132 12133 if (--hprov->dthp_ref == 0) { 12134 dof_hdr_t *dof; 12135 mutex_exit(&dtrace_lock); 12136 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 12137 dtrace_dof_destroy(dof); 12138 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 12139 } else { 12140 mutex_exit(&dtrace_lock); 12141 } 12142 } 12143 12144 static int 12145 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 12146 { 12147 uintptr_t daddr = (uintptr_t)dof; 12148 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 12149 dof_provider_t *provider; 12150 dof_probe_t *probe; 12151 uint8_t *arg; 12152 char *strtab, *typestr; 12153 dof_stridx_t typeidx; 12154 size_t typesz; 12155 uint_t nprobes, j, k; 12156 12157 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 12158 12159 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 12160 dtrace_dof_error(dof, "misaligned section offset"); 12161 return (-1); 12162 } 12163 12164 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 12165 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 12166 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 12167 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 12168 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 12169 12170 if (str_sec == NULL || prb_sec == NULL || 12171 arg_sec == NULL || off_sec == NULL) 12172 return (-1); 12173 12174 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 12175 12176 if (provider->dofpv_name >= str_sec->dofs_size || 12177 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 12178 dtrace_dof_error(dof, "invalid provider name"); 12179 return (-1); 12180 } 12181 12182 if (prb_sec->dofs_entsize == 0 || 12183 prb_sec->dofs_entsize > prb_sec->dofs_size) { 12184 dtrace_dof_error(dof, "invalid entry size"); 12185 return (-1); 12186 } 12187 12188 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 12189 dtrace_dof_error(dof, "misaligned entry size"); 12190 return (-1); 12191 } 12192 12193 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 12194 dtrace_dof_error(dof, "invalid entry size"); 12195 return (-1); 12196 } 12197 12198 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 12199 dtrace_dof_error(dof, "misaligned section offset"); 12200 return (-1); 12201 } 12202 12203 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 12204 dtrace_dof_error(dof, "invalid entry size"); 12205 return (-1); 12206 } 12207 12208 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 12209 12210 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 12211 12212 /* 12213 * Take a pass through the probes to check for errors. 12214 */ 12215 for (j = 0; j < nprobes; j++) { 12216 probe = (dof_probe_t *)(uintptr_t)(daddr + 12217 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 12218 12219 if (probe->dofpr_func >= str_sec->dofs_size) { 12220 dtrace_dof_error(dof, "invalid function name"); 12221 return (-1); 12222 } 12223 12224 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 12225 dtrace_dof_error(dof, "function name too long"); 12226 return (-1); 12227 } 12228 12229 if (probe->dofpr_name >= str_sec->dofs_size || 12230 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 12231 dtrace_dof_error(dof, "invalid probe name"); 12232 return (-1); 12233 } 12234 12235 12236 if (probe->dofpr_offidx + probe->dofpr_noffs < 12237 probe->dofpr_offidx || 12238 (probe->dofpr_offidx + probe->dofpr_noffs) * 12239 off_sec->dofs_entsize > off_sec->dofs_size) { 12240 dtrace_dof_error(dof, "invalid probe offset"); 12241 return (-1); 12242 } 12243 12244 if (probe->dofpr_argidx + probe->dofpr_xargc < 12245 probe->dofpr_argidx || 12246 (probe->dofpr_argidx + probe->dofpr_xargc) * 12247 arg_sec->dofs_entsize > arg_sec->dofs_size) { 12248 dtrace_dof_error(dof, "invalid args"); 12249 return (-1); 12250 } 12251 12252 typeidx = probe->dofpr_nargv; 12253 typestr = strtab + probe->dofpr_nargv; 12254 for (k = 0; k < probe->dofpr_nargc; k++) { 12255 if (typeidx >= str_sec->dofs_size) { 12256 dtrace_dof_error(dof, "bad " 12257 "native argument type"); 12258 return (-1); 12259 } 12260 12261 typesz = strlen(typestr) + 1; 12262 if (typesz > DTRACE_ARGTYPELEN) { 12263 dtrace_dof_error(dof, "native " 12264 "argument type too long"); 12265 return (-1); 12266 } 12267 typeidx += typesz; 12268 typestr += typesz; 12269 } 12270 12271 typeidx = probe->dofpr_xargv; 12272 typestr = strtab + probe->dofpr_xargv; 12273 for (k = 0; k < probe->dofpr_xargc; k++) { 12274 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 12275 dtrace_dof_error(dof, "bad " 12276 "native argument index"); 12277 return (-1); 12278 } 12279 12280 if (typeidx >= str_sec->dofs_size) { 12281 dtrace_dof_error(dof, "bad " 12282 "translated argument type"); 12283 return (-1); 12284 } 12285 12286 typesz = strlen(typestr) + 1; 12287 if (typesz > DTRACE_ARGTYPELEN) { 12288 dtrace_dof_error(dof, "translated argument " 12289 "type too long"); 12290 return (-1); 12291 } 12292 12293 typeidx += typesz; 12294 typestr += typesz; 12295 } 12296 } 12297 12298 return (0); 12299 } 12300 12301 static int 12302 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 12303 { 12304 dtrace_helpers_t *help; 12305 dtrace_vstate_t *vstate; 12306 dtrace_enabling_t *enab = NULL; 12307 int i, gen, rv, nhelpers = 0, destroy = 1; 12308 12309 ASSERT(MUTEX_HELD(&dtrace_lock)); 12310 12311 if ((help = curproc->p_dtrace_helpers) == NULL) 12312 help = dtrace_helpers_create(curproc); 12313 12314 vstate = &help->dthps_vstate; 12315 12316 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 12317 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 12318 dtrace_dof_destroy(dof); 12319 return (rv); 12320 } 12321 12322 /* 12323 * Now we need to walk through the ECB descriptions in the enabling. 12324 */ 12325 for (i = 0; i < enab->dten_ndesc; i++) { 12326 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12327 dtrace_probedesc_t *desc = &ep->dted_probe; 12328 12329 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 12330 continue; 12331 12332 if (strcmp(desc->dtpd_mod, "helper") != 0) 12333 continue; 12334 12335 if (strcmp(desc->dtpd_func, "ustack") != 0) 12336 continue; 12337 12338 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 12339 ep)) != 0) { 12340 /* 12341 * Adding this helper action failed -- we are now going 12342 * to rip out the entire generation and return failure. 12343 */ 12344 (void) dtrace_helper_destroygen(help->dthps_generation); 12345 dtrace_enabling_destroy(enab); 12346 dtrace_dof_destroy(dof); 12347 dtrace_err = rv; 12348 return (-1); 12349 } 12350 12351 nhelpers++; 12352 } 12353 12354 if (nhelpers < enab->dten_ndesc) 12355 dtrace_dof_error(dof, "unmatched helpers"); 12356 12357 if (dhp != NULL) { 12358 uintptr_t daddr = (uintptr_t)dof; 12359 int err = 0; 12360 12361 /* 12362 * Look for helper probes. 12363 */ 12364 for (i = 0; i < dof->dofh_secnum; i++) { 12365 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 12366 dof->dofh_secoff + i * dof->dofh_secsize); 12367 12368 if (sec->dofs_type != DOF_SECT_PROVIDER) 12369 continue; 12370 12371 if (dtrace_helper_provider_validate(dof, sec) != 0) { 12372 err = 1; 12373 break; 12374 } 12375 } 12376 12377 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 12378 if (err == 0 && dtrace_helper_provider_add(dhp) == 0) 12379 destroy = 0; 12380 else 12381 dhp = NULL; 12382 } 12383 12384 gen = help->dthps_generation++; 12385 dtrace_enabling_destroy(enab); 12386 12387 if (dhp != NULL) { 12388 mutex_exit(&dtrace_lock); 12389 dtrace_helper_provider_register(curproc, help, dhp); 12390 mutex_enter(&dtrace_lock); 12391 } 12392 12393 if (destroy) 12394 dtrace_dof_destroy(dof); 12395 12396 return (gen); 12397 } 12398 12399 static dtrace_helpers_t * 12400 dtrace_helpers_create(proc_t *p) 12401 { 12402 dtrace_helpers_t *help; 12403 12404 ASSERT(MUTEX_HELD(&dtrace_lock)); 12405 ASSERT(p->p_dtrace_helpers == NULL); 12406 12407 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 12408 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 12409 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 12410 12411 p->p_dtrace_helpers = help; 12412 dtrace_helpers++; 12413 12414 return (help); 12415 } 12416 12417 static void 12418 dtrace_helpers_destroy(void) 12419 { 12420 dtrace_helpers_t *help; 12421 dtrace_vstate_t *vstate; 12422 proc_t *p = curproc; 12423 int i; 12424 12425 mutex_enter(&dtrace_lock); 12426 12427 ASSERT(p->p_dtrace_helpers != NULL); 12428 ASSERT(dtrace_helpers > 0); 12429 12430 help = p->p_dtrace_helpers; 12431 vstate = &help->dthps_vstate; 12432 12433 /* 12434 * We're now going to lose the help from this process. 12435 */ 12436 p->p_dtrace_helpers = NULL; 12437 dtrace_sync(); 12438 12439 /* 12440 * Destory the helper actions. 12441 */ 12442 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12443 dtrace_helper_action_t *h, *next; 12444 12445 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12446 next = h->dthp_next; 12447 dtrace_helper_destroy(h, vstate); 12448 h = next; 12449 } 12450 } 12451 12452 mutex_exit(&dtrace_lock); 12453 12454 /* 12455 * Destroy the helper providers. 12456 */ 12457 if (help->dthps_nprovs > 0) { 12458 mutex_enter(&dtrace_meta_lock); 12459 if (dtrace_meta_pid != NULL) { 12460 ASSERT(dtrace_deferred_pid == NULL); 12461 12462 for (i = 0; i < help->dthps_nprovs; i++) { 12463 dtrace_helper_remove( 12464 &help->dthps_provs[i]->dthp_prov, p->p_pid); 12465 } 12466 } else { 12467 mutex_enter(&dtrace_lock); 12468 ASSERT(dtrace_deferred_pid != NULL); 12469 12470 /* 12471 * Remove the helper from the deferred list. 12472 */ 12473 if (help->dthps_next != NULL) 12474 help->dthps_next->dthps_prev = help->dthps_prev; 12475 if (help->dthps_prev != NULL) 12476 help->dthps_prev->dthps_next = help->dthps_next; 12477 if (dtrace_deferred_pid == help) { 12478 dtrace_deferred_pid = help->dthps_next; 12479 ASSERT(help->dthps_prev == NULL); 12480 } 12481 12482 mutex_exit(&dtrace_lock); 12483 } 12484 12485 mutex_exit(&dtrace_meta_lock); 12486 12487 for (i = 0; i < help->dthps_nprovs; i++) { 12488 dtrace_helper_provider_remove(help->dthps_provs[i]); 12489 } 12490 12491 kmem_free(help->dthps_provs, help->dthps_nprovs * 12492 sizeof (dtrace_helper_provider_t *)); 12493 } 12494 12495 mutex_enter(&dtrace_lock); 12496 12497 dtrace_vstate_fini(&help->dthps_vstate); 12498 kmem_free(help->dthps_actions, 12499 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 12500 kmem_free(help, sizeof (dtrace_helpers_t)); 12501 12502 --dtrace_helpers; 12503 mutex_exit(&dtrace_lock); 12504 } 12505 12506 static void 12507 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 12508 { 12509 dtrace_helpers_t *help, *newhelp; 12510 dtrace_helper_action_t *helper, *new, *last; 12511 dtrace_difo_t *dp; 12512 dtrace_vstate_t *vstate; 12513 int i, j, sz, hasprovs = 0; 12514 12515 mutex_enter(&dtrace_lock); 12516 ASSERT(from->p_dtrace_helpers != NULL); 12517 ASSERT(dtrace_helpers > 0); 12518 12519 help = from->p_dtrace_helpers; 12520 newhelp = dtrace_helpers_create(to); 12521 ASSERT(to->p_dtrace_helpers != NULL); 12522 12523 newhelp->dthps_generation = help->dthps_generation; 12524 vstate = &newhelp->dthps_vstate; 12525 12526 /* 12527 * Duplicate the helper actions. 12528 */ 12529 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12530 if ((helper = help->dthps_actions[i]) == NULL) 12531 continue; 12532 12533 for (last = NULL; helper != NULL; helper = helper->dthp_next) { 12534 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 12535 KM_SLEEP); 12536 new->dthp_generation = helper->dthp_generation; 12537 12538 if ((dp = helper->dthp_predicate) != NULL) { 12539 dp = dtrace_difo_duplicate(dp, vstate); 12540 new->dthp_predicate = dp; 12541 } 12542 12543 new->dthp_nactions = helper->dthp_nactions; 12544 sz = sizeof (dtrace_difo_t *) * new->dthp_nactions; 12545 new->dthp_actions = kmem_alloc(sz, KM_SLEEP); 12546 12547 for (j = 0; j < new->dthp_nactions; j++) { 12548 dtrace_difo_t *dp = helper->dthp_actions[j]; 12549 12550 ASSERT(dp != NULL); 12551 dp = dtrace_difo_duplicate(dp, vstate); 12552 new->dthp_actions[j] = dp; 12553 } 12554 12555 if (last != NULL) { 12556 last->dthp_next = new; 12557 } else { 12558 newhelp->dthps_actions[i] = new; 12559 } 12560 12561 last = new; 12562 } 12563 } 12564 12565 /* 12566 * Duplicate the helper providers and register them with the 12567 * DTrace framework. 12568 */ 12569 if (help->dthps_nprovs > 0) { 12570 newhelp->dthps_nprovs = help->dthps_nprovs; 12571 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 12572 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12573 for (i = 0; i < newhelp->dthps_nprovs; i++) { 12574 newhelp->dthps_provs[i] = help->dthps_provs[i]; 12575 newhelp->dthps_provs[i]->dthp_ref++; 12576 } 12577 12578 hasprovs = 1; 12579 } 12580 12581 mutex_exit(&dtrace_lock); 12582 12583 if (hasprovs) 12584 dtrace_helper_provider_register(to, newhelp, NULL); 12585 } 12586 12587 /* 12588 * DTrace Hook Functions 12589 */ 12590 static void 12591 dtrace_module_loaded(struct modctl *ctl) 12592 { 12593 dtrace_provider_t *prv; 12594 12595 mutex_enter(&dtrace_provider_lock); 12596 mutex_enter(&mod_lock); 12597 12598 ASSERT(ctl->mod_busy); 12599 12600 /* 12601 * We're going to call each providers per-module provide operation 12602 * specifying only this module. 12603 */ 12604 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 12605 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 12606 12607 mutex_exit(&mod_lock); 12608 mutex_exit(&dtrace_provider_lock); 12609 12610 /* 12611 * If we have any retained enablings, we need to match against them. 12612 * Enabling probes requires that cpu_lock be held, and we cannot hold 12613 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 12614 * module. (In particular, this happens when loading scheduling 12615 * classes.) So if we have any retained enablings, we need to dispatch 12616 * our task queue to do the match for us. 12617 */ 12618 mutex_enter(&dtrace_lock); 12619 12620 if (dtrace_retained == NULL) { 12621 mutex_exit(&dtrace_lock); 12622 return; 12623 } 12624 12625 (void) taskq_dispatch(dtrace_taskq, 12626 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 12627 12628 mutex_exit(&dtrace_lock); 12629 12630 /* 12631 * And now, for a little heuristic sleaze: in general, we want to 12632 * match modules as soon as they load. However, we cannot guarantee 12633 * this, because it would lead us to the lock ordering violation 12634 * outlined above. The common case, of course, is that cpu_lock is 12635 * _not_ held -- so we delay here for a clock tick, hoping that that's 12636 * long enough for the task queue to do its work. If it's not, it's 12637 * not a serious problem -- it just means that the module that we 12638 * just loaded may not be immediately instrumentable. 12639 */ 12640 delay(1); 12641 } 12642 12643 static void 12644 dtrace_module_unloaded(struct modctl *ctl) 12645 { 12646 dtrace_probe_t template, *probe, *first, *next; 12647 dtrace_provider_t *prov; 12648 12649 template.dtpr_mod = ctl->mod_modname; 12650 12651 mutex_enter(&dtrace_provider_lock); 12652 mutex_enter(&mod_lock); 12653 mutex_enter(&dtrace_lock); 12654 12655 if (dtrace_bymod == NULL) { 12656 /* 12657 * The DTrace module is loaded (obviously) but not attached; 12658 * we don't have any work to do. 12659 */ 12660 mutex_exit(&dtrace_provider_lock); 12661 mutex_exit(&mod_lock); 12662 mutex_exit(&dtrace_lock); 12663 return; 12664 } 12665 12666 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 12667 probe != NULL; probe = probe->dtpr_nextmod) { 12668 if (probe->dtpr_ecb != NULL) { 12669 mutex_exit(&dtrace_provider_lock); 12670 mutex_exit(&mod_lock); 12671 mutex_exit(&dtrace_lock); 12672 12673 /* 12674 * This shouldn't _actually_ be possible -- we're 12675 * unloading a module that has an enabled probe in it. 12676 * (It's normally up to the provider to make sure that 12677 * this can't happen.) However, because dtps_enable() 12678 * doesn't have a failure mode, there can be an 12679 * enable/unload race. Upshot: we don't want to 12680 * assert, but we're not going to disable the 12681 * probe, either. 12682 */ 12683 if (dtrace_err_verbose) { 12684 cmn_err(CE_WARN, "unloaded module '%s' had " 12685 "enabled probes", ctl->mod_modname); 12686 } 12687 12688 return; 12689 } 12690 } 12691 12692 probe = first; 12693 12694 for (first = NULL; probe != NULL; probe = next) { 12695 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 12696 12697 dtrace_probes[probe->dtpr_id - 1] = NULL; 12698 12699 next = probe->dtpr_nextmod; 12700 dtrace_hash_remove(dtrace_bymod, probe); 12701 dtrace_hash_remove(dtrace_byfunc, probe); 12702 dtrace_hash_remove(dtrace_byname, probe); 12703 12704 if (first == NULL) { 12705 first = probe; 12706 probe->dtpr_nextmod = NULL; 12707 } else { 12708 probe->dtpr_nextmod = first; 12709 first = probe; 12710 } 12711 } 12712 12713 /* 12714 * We've removed all of the module's probes from the hash chains and 12715 * from the probe array. Now issue a dtrace_sync() to be sure that 12716 * everyone has cleared out from any probe array processing. 12717 */ 12718 dtrace_sync(); 12719 12720 for (probe = first; probe != NULL; probe = first) { 12721 first = probe->dtpr_nextmod; 12722 prov = probe->dtpr_provider; 12723 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 12724 probe->dtpr_arg); 12725 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 12726 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 12727 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 12728 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 12729 kmem_free(probe, sizeof (dtrace_probe_t)); 12730 } 12731 12732 mutex_exit(&dtrace_lock); 12733 mutex_exit(&mod_lock); 12734 mutex_exit(&dtrace_provider_lock); 12735 } 12736 12737 void 12738 dtrace_suspend(void) 12739 { 12740 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 12741 } 12742 12743 void 12744 dtrace_resume(void) 12745 { 12746 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 12747 } 12748 12749 static int 12750 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 12751 { 12752 ASSERT(MUTEX_HELD(&cpu_lock)); 12753 mutex_enter(&dtrace_lock); 12754 12755 switch (what) { 12756 case CPU_CONFIG: { 12757 dtrace_state_t *state; 12758 dtrace_optval_t *opt, rs, c; 12759 12760 /* 12761 * For now, we only allocate a new buffer for anonymous state. 12762 */ 12763 if ((state = dtrace_anon.dta_state) == NULL) 12764 break; 12765 12766 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12767 break; 12768 12769 opt = state->dts_options; 12770 c = opt[DTRACEOPT_CPU]; 12771 12772 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 12773 break; 12774 12775 /* 12776 * Regardless of what the actual policy is, we're going to 12777 * temporarily set our resize policy to be manual. We're 12778 * also going to temporarily set our CPU option to denote 12779 * the newly configured CPU. 12780 */ 12781 rs = opt[DTRACEOPT_BUFRESIZE]; 12782 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 12783 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 12784 12785 (void) dtrace_state_buffers(state); 12786 12787 opt[DTRACEOPT_BUFRESIZE] = rs; 12788 opt[DTRACEOPT_CPU] = c; 12789 12790 break; 12791 } 12792 12793 case CPU_UNCONFIG: 12794 /* 12795 * We don't free the buffer in the CPU_UNCONFIG case. (The 12796 * buffer will be freed when the consumer exits.) 12797 */ 12798 break; 12799 12800 default: 12801 break; 12802 } 12803 12804 mutex_exit(&dtrace_lock); 12805 return (0); 12806 } 12807 12808 static void 12809 dtrace_cpu_setup_initial(processorid_t cpu) 12810 { 12811 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 12812 } 12813 12814 static void 12815 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 12816 { 12817 if (dtrace_toxranges >= dtrace_toxranges_max) { 12818 int osize, nsize; 12819 dtrace_toxrange_t *range; 12820 12821 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12822 12823 if (osize == 0) { 12824 ASSERT(dtrace_toxrange == NULL); 12825 ASSERT(dtrace_toxranges_max == 0); 12826 dtrace_toxranges_max = 1; 12827 } else { 12828 dtrace_toxranges_max <<= 1; 12829 } 12830 12831 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12832 range = kmem_zalloc(nsize, KM_SLEEP); 12833 12834 if (dtrace_toxrange != NULL) { 12835 ASSERT(osize != 0); 12836 bcopy(dtrace_toxrange, range, osize); 12837 kmem_free(dtrace_toxrange, osize); 12838 } 12839 12840 dtrace_toxrange = range; 12841 } 12842 12843 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 12844 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 12845 12846 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 12847 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 12848 dtrace_toxranges++; 12849 } 12850 12851 /* 12852 * DTrace Driver Cookbook Functions 12853 */ 12854 /*ARGSUSED*/ 12855 static int 12856 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 12857 { 12858 dtrace_provider_id_t id; 12859 dtrace_state_t *state = NULL; 12860 dtrace_enabling_t *enab; 12861 12862 mutex_enter(&cpu_lock); 12863 mutex_enter(&dtrace_provider_lock); 12864 mutex_enter(&dtrace_lock); 12865 12866 if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t) + 12867 NCPU * sizeof (dtrace_buffer_t), 0) != 0) { 12868 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 12869 mutex_exit(&cpu_lock); 12870 mutex_exit(&dtrace_provider_lock); 12871 mutex_exit(&dtrace_lock); 12872 return (DDI_FAILURE); 12873 } 12874 12875 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 12876 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 12877 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 12878 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 12879 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 12880 ddi_remove_minor_node(devi, NULL); 12881 ddi_soft_state_fini(&dtrace_softstate); 12882 mutex_exit(&cpu_lock); 12883 mutex_exit(&dtrace_provider_lock); 12884 mutex_exit(&dtrace_lock); 12885 return (DDI_FAILURE); 12886 } 12887 12888 ddi_report_dev(devi); 12889 dtrace_devi = devi; 12890 12891 dtrace_modload = dtrace_module_loaded; 12892 dtrace_modunload = dtrace_module_unloaded; 12893 dtrace_cpu_init = dtrace_cpu_setup_initial; 12894 dtrace_helpers_cleanup = dtrace_helpers_destroy; 12895 dtrace_helpers_fork = dtrace_helpers_duplicate; 12896 dtrace_cpustart_init = dtrace_suspend; 12897 dtrace_cpustart_fini = dtrace_resume; 12898 dtrace_debugger_init = dtrace_suspend; 12899 dtrace_debugger_fini = dtrace_resume; 12900 dtrace_kreloc_init = dtrace_suspend; 12901 dtrace_kreloc_fini = dtrace_resume; 12902 12903 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 12904 12905 ASSERT(MUTEX_HELD(&cpu_lock)); 12906 12907 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 12908 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12909 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 12910 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 12911 VM_SLEEP | VMC_IDENTIFIER); 12912 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 12913 1, INT_MAX, 0); 12914 12915 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 12916 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 12917 NULL, NULL, NULL, NULL, NULL, 0); 12918 12919 ASSERT(MUTEX_HELD(&cpu_lock)); 12920 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 12921 offsetof(dtrace_probe_t, dtpr_nextmod), 12922 offsetof(dtrace_probe_t, dtpr_prevmod)); 12923 12924 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 12925 offsetof(dtrace_probe_t, dtpr_nextfunc), 12926 offsetof(dtrace_probe_t, dtpr_prevfunc)); 12927 12928 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 12929 offsetof(dtrace_probe_t, dtpr_nextname), 12930 offsetof(dtrace_probe_t, dtpr_prevname)); 12931 12932 if (dtrace_retain_max < 1) { 12933 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 12934 "setting to 1", dtrace_retain_max); 12935 dtrace_retain_max = 1; 12936 } 12937 12938 /* 12939 * Now discover our toxic ranges. 12940 */ 12941 dtrace_toxic_ranges(dtrace_toxrange_add); 12942 12943 /* 12944 * Before we register ourselves as a provider to our own framework, 12945 * we would like to assert that dtrace_provider is NULL -- but that's 12946 * not true if we were loaded as a dependency of a DTrace provider. 12947 * Once we've registered, we can assert that dtrace_provider is our 12948 * pseudo provider. 12949 */ 12950 (void) dtrace_register("dtrace", &dtrace_provider_attr, 12951 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 12952 12953 ASSERT(dtrace_provider != NULL); 12954 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 12955 12956 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 12957 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 12958 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 12959 dtrace_provider, NULL, NULL, "END", 0, NULL); 12960 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 12961 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 12962 12963 dtrace_anon_property(); 12964 mutex_exit(&cpu_lock); 12965 12966 /* 12967 * If DTrace helper tracing is enabled, we need to allocate the 12968 * trace buffer and initialize the values. 12969 */ 12970 if (dtrace_helptrace_enabled) { 12971 ASSERT(dtrace_helptrace_buffer == NULL); 12972 dtrace_helptrace_buffer = 12973 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 12974 dtrace_helptrace_next = 0; 12975 } 12976 12977 /* 12978 * If there are already providers, we must ask them to provide their 12979 * probes, and then match any anonymous enabling against them. Note 12980 * that there should be no other retained enablings at this time: 12981 * the only retained enablings at this time should be the anonymous 12982 * enabling. 12983 */ 12984 if (dtrace_anon.dta_enabling != NULL) { 12985 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 12986 12987 dtrace_enabling_provide(NULL); 12988 state = dtrace_anon.dta_state; 12989 12990 /* 12991 * We couldn't hold cpu_lock across the above call to 12992 * dtrace_enabling_provide(), but we must hold it to actually 12993 * enable the probes. We have to drop all of our locks, pick 12994 * up cpu_lock, and regain our locks before matching the 12995 * retained anonymous enabling. 12996 */ 12997 mutex_exit(&dtrace_lock); 12998 mutex_exit(&dtrace_provider_lock); 12999 13000 mutex_enter(&cpu_lock); 13001 mutex_enter(&dtrace_provider_lock); 13002 mutex_enter(&dtrace_lock); 13003 13004 if ((enab = dtrace_anon.dta_enabling) != NULL) 13005 (void) dtrace_enabling_match(enab, NULL); 13006 13007 mutex_exit(&cpu_lock); 13008 } 13009 13010 mutex_exit(&dtrace_lock); 13011 mutex_exit(&dtrace_provider_lock); 13012 13013 if (state != NULL) { 13014 /* 13015 * If we created any anonymous state, set it going now. 13016 */ 13017 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 13018 } 13019 13020 return (DDI_SUCCESS); 13021 } 13022 13023 /*ARGSUSED*/ 13024 static int 13025 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 13026 { 13027 dtrace_state_t *state; 13028 uint32_t priv; 13029 uid_t uid; 13030 13031 if (getminor(*devp) == DTRACEMNRN_HELPER) 13032 return (0); 13033 13034 /* 13035 * If this wasn't an open with the "helper" minor, then it must be 13036 * the "dtrace" minor. 13037 */ 13038 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 13039 13040 /* 13041 * If no DTRACE_PRIV_* bits are set in the credential, then the 13042 * caller lacks sufficient permission to do anything with DTrace. 13043 */ 13044 dtrace_cred2priv(cred_p, &priv, &uid); 13045 if (priv == DTRACE_PRIV_NONE) 13046 return (EACCES); 13047 13048 /* 13049 * Ask all providers to provide all their probes. 13050 */ 13051 mutex_enter(&dtrace_provider_lock); 13052 dtrace_probe_provide(NULL, NULL); 13053 mutex_exit(&dtrace_provider_lock); 13054 13055 mutex_enter(&cpu_lock); 13056 mutex_enter(&dtrace_lock); 13057 dtrace_opens++; 13058 dtrace_membar_producer(); 13059 13060 /* 13061 * If the kernel debugger is active (that is, if the kernel debugger 13062 * modified text in some way), we won't allow the open. 13063 */ 13064 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13065 dtrace_opens--; 13066 mutex_exit(&cpu_lock); 13067 mutex_exit(&dtrace_lock); 13068 return (EBUSY); 13069 } 13070 13071 state = dtrace_state_create(devp, cred_p); 13072 mutex_exit(&cpu_lock); 13073 13074 if (state == NULL) { 13075 if (--dtrace_opens == 0) 13076 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13077 mutex_exit(&dtrace_lock); 13078 return (EAGAIN); 13079 } 13080 13081 mutex_exit(&dtrace_lock); 13082 13083 return (0); 13084 } 13085 13086 /*ARGSUSED*/ 13087 static int 13088 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 13089 { 13090 minor_t minor = getminor(dev); 13091 dtrace_state_t *state; 13092 13093 if (minor == DTRACEMNRN_HELPER) 13094 return (0); 13095 13096 state = ddi_get_soft_state(dtrace_softstate, minor); 13097 13098 mutex_enter(&cpu_lock); 13099 mutex_enter(&dtrace_lock); 13100 13101 if (state->dts_anon) { 13102 /* 13103 * There is anonymous state. Destroy that first. 13104 */ 13105 ASSERT(dtrace_anon.dta_state == NULL); 13106 dtrace_state_destroy(state->dts_anon); 13107 } 13108 13109 dtrace_state_destroy(state); 13110 ASSERT(dtrace_opens > 0); 13111 if (--dtrace_opens == 0) 13112 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13113 13114 mutex_exit(&dtrace_lock); 13115 mutex_exit(&cpu_lock); 13116 13117 return (0); 13118 } 13119 13120 /*ARGSUSED*/ 13121 static int 13122 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 13123 { 13124 int rval; 13125 dof_helper_t help, *dhp = NULL; 13126 13127 switch (cmd) { 13128 case DTRACEHIOC_ADDDOF: 13129 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 13130 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 13131 return (EFAULT); 13132 } 13133 13134 dhp = &help; 13135 arg = (intptr_t)help.dofhp_dof; 13136 /*FALLTHROUGH*/ 13137 13138 case DTRACEHIOC_ADD: { 13139 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 13140 13141 if (dof == NULL) 13142 return (rval); 13143 13144 mutex_enter(&dtrace_lock); 13145 dtrace_err = 0; 13146 13147 /* 13148 * dtrace_helper_slurp() takes responsibility for the dof -- 13149 * it may free it now or it may save it and free it later. 13150 */ 13151 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 13152 *rv = rval; 13153 rval = 0; 13154 } else { 13155 rval = EINVAL; 13156 } 13157 13158 mutex_exit(&dtrace_lock); 13159 return (rval); 13160 } 13161 13162 case DTRACEHIOC_REMOVE: { 13163 mutex_enter(&dtrace_lock); 13164 rval = dtrace_helper_destroygen(arg); 13165 mutex_exit(&dtrace_lock); 13166 13167 return (rval); 13168 } 13169 13170 default: 13171 break; 13172 } 13173 13174 return (ENOTTY); 13175 } 13176 13177 /*ARGSUSED*/ 13178 static int 13179 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 13180 { 13181 minor_t minor = getminor(dev); 13182 dtrace_state_t *state; 13183 int rval; 13184 13185 if (minor == DTRACEMNRN_HELPER) 13186 return (dtrace_ioctl_helper(cmd, arg, rv)); 13187 13188 state = ddi_get_soft_state(dtrace_softstate, minor); 13189 13190 if (state->dts_anon) { 13191 ASSERT(dtrace_anon.dta_state == NULL); 13192 state = state->dts_anon; 13193 } 13194 13195 switch (cmd) { 13196 case DTRACEIOC_PROVIDER: { 13197 dtrace_providerdesc_t pvd; 13198 dtrace_provider_t *pvp; 13199 13200 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 13201 return (EFAULT); 13202 13203 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 13204 mutex_enter(&dtrace_provider_lock); 13205 13206 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 13207 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 13208 break; 13209 } 13210 13211 mutex_exit(&dtrace_provider_lock); 13212 13213 if (pvp == NULL) 13214 return (ESRCH); 13215 13216 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 13217 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 13218 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 13219 return (EFAULT); 13220 13221 return (0); 13222 } 13223 13224 case DTRACEIOC_EPROBE: { 13225 dtrace_eprobedesc_t epdesc; 13226 dtrace_ecb_t *ecb; 13227 dtrace_action_t *act; 13228 void *buf; 13229 size_t size; 13230 uintptr_t dest; 13231 int nrecs; 13232 13233 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 13234 return (EFAULT); 13235 13236 mutex_enter(&dtrace_lock); 13237 13238 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 13239 mutex_exit(&dtrace_lock); 13240 return (EINVAL); 13241 } 13242 13243 if (ecb->dte_probe == NULL) { 13244 mutex_exit(&dtrace_lock); 13245 return (EINVAL); 13246 } 13247 13248 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 13249 epdesc.dtepd_uarg = ecb->dte_uarg; 13250 epdesc.dtepd_size = ecb->dte_size; 13251 13252 nrecs = epdesc.dtepd_nrecs; 13253 epdesc.dtepd_nrecs = 0; 13254 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13255 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13256 continue; 13257 13258 epdesc.dtepd_nrecs++; 13259 } 13260 13261 /* 13262 * Now that we have the size, we need to allocate a temporary 13263 * buffer in which to store the complete description. We need 13264 * the temporary buffer to be able to drop dtrace_lock() 13265 * across the copyout(), below. 13266 */ 13267 size = sizeof (dtrace_eprobedesc_t) + 13268 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 13269 13270 buf = kmem_alloc(size, KM_SLEEP); 13271 dest = (uintptr_t)buf; 13272 13273 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 13274 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 13275 13276 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13277 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13278 continue; 13279 13280 if (nrecs-- == 0) 13281 break; 13282 13283 bcopy(&act->dta_rec, (void *)dest, 13284 sizeof (dtrace_recdesc_t)); 13285 dest += sizeof (dtrace_recdesc_t); 13286 } 13287 13288 mutex_exit(&dtrace_lock); 13289 13290 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13291 kmem_free(buf, size); 13292 return (EFAULT); 13293 } 13294 13295 kmem_free(buf, size); 13296 return (0); 13297 } 13298 13299 case DTRACEIOC_AGGDESC: { 13300 dtrace_aggdesc_t aggdesc; 13301 dtrace_action_t *act; 13302 dtrace_aggregation_t *agg; 13303 int nrecs; 13304 uint32_t offs; 13305 dtrace_recdesc_t *lrec; 13306 void *buf; 13307 size_t size; 13308 uintptr_t dest; 13309 13310 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 13311 return (EFAULT); 13312 13313 mutex_enter(&dtrace_lock); 13314 13315 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 13316 mutex_exit(&dtrace_lock); 13317 return (EINVAL); 13318 } 13319 13320 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 13321 13322 nrecs = aggdesc.dtagd_nrecs; 13323 aggdesc.dtagd_nrecs = 0; 13324 13325 offs = agg->dtag_base; 13326 lrec = &agg->dtag_action.dta_rec; 13327 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 13328 13329 for (act = agg->dtag_first; ; act = act->dta_next) { 13330 ASSERT(act->dta_intuple || 13331 DTRACEACT_ISAGG(act->dta_kind)); 13332 aggdesc.dtagd_nrecs++; 13333 13334 if (act == &agg->dtag_action) 13335 break; 13336 } 13337 13338 /* 13339 * Now that we have the size, we need to allocate a temporary 13340 * buffer in which to store the complete description. We need 13341 * the temporary buffer to be able to drop dtrace_lock() 13342 * across the copyout(), below. 13343 */ 13344 size = sizeof (dtrace_aggdesc_t) + 13345 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 13346 13347 buf = kmem_alloc(size, KM_SLEEP); 13348 dest = (uintptr_t)buf; 13349 13350 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 13351 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 13352 13353 for (act = agg->dtag_first; ; act = act->dta_next) { 13354 dtrace_recdesc_t rec = act->dta_rec; 13355 13356 if (nrecs-- == 0) 13357 break; 13358 13359 rec.dtrd_offset -= offs; 13360 bcopy(&rec, (void *)dest, sizeof (rec)); 13361 dest += sizeof (dtrace_recdesc_t); 13362 13363 if (act == &agg->dtag_action) 13364 break; 13365 } 13366 13367 mutex_exit(&dtrace_lock); 13368 13369 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13370 kmem_free(buf, size); 13371 return (EFAULT); 13372 } 13373 13374 kmem_free(buf, size); 13375 return (0); 13376 } 13377 13378 case DTRACEIOC_ENABLE: { 13379 dof_hdr_t *dof; 13380 dtrace_enabling_t *enab = NULL; 13381 dtrace_vstate_t *vstate; 13382 int err = 0; 13383 13384 *rv = 0; 13385 13386 /* 13387 * If a NULL argument has been passed, we take this as our 13388 * cue to reevaluate our enablings. 13389 */ 13390 if (arg == NULL) { 13391 mutex_enter(&cpu_lock); 13392 mutex_enter(&dtrace_lock); 13393 err = dtrace_enabling_matchstate(state, rv); 13394 mutex_exit(&dtrace_lock); 13395 mutex_exit(&cpu_lock); 13396 13397 return (err); 13398 } 13399 13400 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 13401 return (rval); 13402 13403 mutex_enter(&cpu_lock); 13404 mutex_enter(&dtrace_lock); 13405 vstate = &state->dts_vstate; 13406 13407 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13408 mutex_exit(&dtrace_lock); 13409 mutex_exit(&cpu_lock); 13410 dtrace_dof_destroy(dof); 13411 return (EBUSY); 13412 } 13413 13414 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 13415 mutex_exit(&dtrace_lock); 13416 mutex_exit(&cpu_lock); 13417 dtrace_dof_destroy(dof); 13418 return (EINVAL); 13419 } 13420 13421 if ((rval = dtrace_dof_options(dof, state)) != 0) { 13422 dtrace_enabling_destroy(enab); 13423 mutex_exit(&dtrace_lock); 13424 mutex_exit(&cpu_lock); 13425 dtrace_dof_destroy(dof); 13426 return (rval); 13427 } 13428 13429 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 13430 err = dtrace_enabling_retain(enab); 13431 } else { 13432 dtrace_enabling_destroy(enab); 13433 } 13434 13435 mutex_exit(&cpu_lock); 13436 mutex_exit(&dtrace_lock); 13437 dtrace_dof_destroy(dof); 13438 13439 return (err); 13440 } 13441 13442 case DTRACEIOC_REPLICATE: { 13443 dtrace_repldesc_t desc; 13444 dtrace_probedesc_t *match = &desc.dtrpd_match; 13445 dtrace_probedesc_t *create = &desc.dtrpd_create; 13446 int err; 13447 13448 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13449 return (EFAULT); 13450 13451 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13452 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13453 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13454 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13455 13456 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13457 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13458 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13459 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13460 13461 mutex_enter(&dtrace_lock); 13462 err = dtrace_enabling_replicate(state, match, create); 13463 mutex_exit(&dtrace_lock); 13464 13465 return (err); 13466 } 13467 13468 case DTRACEIOC_PROBEMATCH: 13469 case DTRACEIOC_PROBES: { 13470 dtrace_probe_t *probe = NULL; 13471 dtrace_probedesc_t desc; 13472 dtrace_probekey_t pkey; 13473 dtrace_id_t i; 13474 int m = 0; 13475 uint32_t priv; 13476 uid_t uid; 13477 13478 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13479 return (EFAULT); 13480 13481 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13482 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13483 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13484 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13485 13486 /* 13487 * Before we attempt to match this probe, we want to give 13488 * all providers the opportunity to provide it. 13489 */ 13490 if (desc.dtpd_id == DTRACE_IDNONE) { 13491 mutex_enter(&dtrace_provider_lock); 13492 dtrace_probe_provide(&desc, NULL); 13493 mutex_exit(&dtrace_provider_lock); 13494 desc.dtpd_id++; 13495 } 13496 13497 if (cmd == DTRACEIOC_PROBEMATCH) { 13498 dtrace_probekey(&desc, &pkey); 13499 pkey.dtpk_id = DTRACE_IDNONE; 13500 } 13501 13502 uid = crgetuid(cr); 13503 dtrace_cred2priv(cr, &priv, &uid); 13504 13505 mutex_enter(&dtrace_lock); 13506 13507 if (cmd == DTRACEIOC_PROBEMATCH) { 13508 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13509 if ((probe = dtrace_probes[i - 1]) != NULL && 13510 (m = dtrace_match_probe(probe, &pkey, 13511 priv, uid)) != 0) 13512 break; 13513 } 13514 13515 if (m < 0) { 13516 mutex_exit(&dtrace_lock); 13517 return (EINVAL); 13518 } 13519 13520 } else { 13521 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13522 if ((probe = dtrace_probes[i - 1]) != NULL && 13523 dtrace_match_priv(probe, priv, uid)) 13524 break; 13525 } 13526 } 13527 13528 if (probe == NULL) { 13529 mutex_exit(&dtrace_lock); 13530 return (ESRCH); 13531 } 13532 13533 dtrace_probe_description(probe, &desc); 13534 mutex_exit(&dtrace_lock); 13535 13536 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13537 return (EFAULT); 13538 13539 return (0); 13540 } 13541 13542 case DTRACEIOC_PROBEARG: { 13543 dtrace_argdesc_t desc; 13544 dtrace_probe_t *probe; 13545 dtrace_provider_t *prov; 13546 13547 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13548 return (EFAULT); 13549 13550 if (desc.dtargd_id == DTRACE_IDNONE) 13551 return (EINVAL); 13552 13553 if (desc.dtargd_ndx == DTRACE_ARGNONE) 13554 return (EINVAL); 13555 13556 mutex_enter(&dtrace_provider_lock); 13557 mutex_enter(&mod_lock); 13558 mutex_enter(&dtrace_lock); 13559 13560 if (desc.dtargd_id > dtrace_nprobes) { 13561 mutex_exit(&dtrace_lock); 13562 mutex_exit(&mod_lock); 13563 mutex_exit(&dtrace_provider_lock); 13564 return (EINVAL); 13565 } 13566 13567 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 13568 mutex_exit(&dtrace_lock); 13569 mutex_exit(&mod_lock); 13570 mutex_exit(&dtrace_provider_lock); 13571 return (EINVAL); 13572 } 13573 13574 mutex_exit(&dtrace_lock); 13575 13576 prov = probe->dtpr_provider; 13577 13578 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 13579 /* 13580 * There isn't any typed information for this probe. 13581 * Set the argument number to DTRACE_ARGNONE. 13582 */ 13583 desc.dtargd_ndx = DTRACE_ARGNONE; 13584 } else { 13585 desc.dtargd_native[0] = '\0'; 13586 desc.dtargd_xlate[0] = '\0'; 13587 desc.dtargd_mapping = desc.dtargd_ndx; 13588 13589 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 13590 probe->dtpr_id, probe->dtpr_arg, &desc); 13591 } 13592 13593 mutex_exit(&mod_lock); 13594 mutex_exit(&dtrace_provider_lock); 13595 13596 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13597 return (EFAULT); 13598 13599 return (0); 13600 } 13601 13602 case DTRACEIOC_GO: { 13603 processorid_t cpuid; 13604 rval = dtrace_state_go(state, &cpuid); 13605 13606 if (rval != 0) 13607 return (rval); 13608 13609 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13610 return (EFAULT); 13611 13612 return (0); 13613 } 13614 13615 case DTRACEIOC_STOP: { 13616 processorid_t cpuid; 13617 13618 mutex_enter(&dtrace_lock); 13619 rval = dtrace_state_stop(state, &cpuid); 13620 mutex_exit(&dtrace_lock); 13621 13622 if (rval != 0) 13623 return (rval); 13624 13625 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13626 return (EFAULT); 13627 13628 return (0); 13629 } 13630 13631 case DTRACEIOC_DOFGET: { 13632 dof_hdr_t hdr, *dof; 13633 uint64_t len; 13634 13635 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 13636 return (EFAULT); 13637 13638 mutex_enter(&dtrace_lock); 13639 dof = dtrace_dof_create(state); 13640 mutex_exit(&dtrace_lock); 13641 13642 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 13643 rval = copyout(dof, (void *)arg, len); 13644 dtrace_dof_destroy(dof); 13645 13646 return (rval == 0 ? 0 : EFAULT); 13647 } 13648 13649 case DTRACEIOC_AGGSNAP: 13650 case DTRACEIOC_BUFSNAP: { 13651 dtrace_bufdesc_t desc; 13652 caddr_t cached; 13653 dtrace_buffer_t *buf; 13654 13655 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13656 return (EFAULT); 13657 13658 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 13659 return (EINVAL); 13660 13661 mutex_enter(&dtrace_lock); 13662 13663 if (cmd == DTRACEIOC_BUFSNAP) { 13664 buf = &state->dts_buffer[desc.dtbd_cpu]; 13665 } else { 13666 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 13667 } 13668 13669 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 13670 size_t sz = buf->dtb_offset; 13671 13672 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 13673 mutex_exit(&dtrace_lock); 13674 return (EBUSY); 13675 } 13676 13677 /* 13678 * If this buffer has already been consumed, we're 13679 * going to indicate that there's nothing left here 13680 * to consume. 13681 */ 13682 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 13683 mutex_exit(&dtrace_lock); 13684 13685 desc.dtbd_size = 0; 13686 desc.dtbd_drops = 0; 13687 desc.dtbd_errors = 0; 13688 desc.dtbd_oldest = 0; 13689 sz = sizeof (desc); 13690 13691 if (copyout(&desc, (void *)arg, sz) != 0) 13692 return (EFAULT); 13693 13694 return (0); 13695 } 13696 13697 /* 13698 * If this is a ring buffer that has wrapped, we want 13699 * to copy the whole thing out. 13700 */ 13701 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 13702 dtrace_buffer_polish(buf); 13703 sz = buf->dtb_size; 13704 } 13705 13706 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 13707 mutex_exit(&dtrace_lock); 13708 return (EFAULT); 13709 } 13710 13711 desc.dtbd_size = sz; 13712 desc.dtbd_drops = buf->dtb_drops; 13713 desc.dtbd_errors = buf->dtb_errors; 13714 desc.dtbd_oldest = buf->dtb_xamot_offset; 13715 13716 mutex_exit(&dtrace_lock); 13717 13718 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13719 return (EFAULT); 13720 13721 buf->dtb_flags |= DTRACEBUF_CONSUMED; 13722 13723 return (0); 13724 } 13725 13726 if (buf->dtb_tomax == NULL) { 13727 ASSERT(buf->dtb_xamot == NULL); 13728 mutex_exit(&dtrace_lock); 13729 return (ENOENT); 13730 } 13731 13732 cached = buf->dtb_tomax; 13733 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 13734 13735 dtrace_xcall(desc.dtbd_cpu, 13736 (dtrace_xcall_t)dtrace_buffer_switch, buf); 13737 13738 state->dts_errors += buf->dtb_xamot_errors; 13739 13740 /* 13741 * If the buffers did not actually switch, then the cross call 13742 * did not take place -- presumably because the given CPU is 13743 * not in the ready set. If this is the case, we'll return 13744 * ENOENT. 13745 */ 13746 if (buf->dtb_tomax == cached) { 13747 ASSERT(buf->dtb_xamot != cached); 13748 mutex_exit(&dtrace_lock); 13749 return (ENOENT); 13750 } 13751 13752 ASSERT(cached == buf->dtb_xamot); 13753 13754 /* 13755 * We have our snapshot; now copy it out. 13756 */ 13757 if (copyout(buf->dtb_xamot, desc.dtbd_data, 13758 buf->dtb_xamot_offset) != 0) { 13759 mutex_exit(&dtrace_lock); 13760 return (EFAULT); 13761 } 13762 13763 desc.dtbd_size = buf->dtb_xamot_offset; 13764 desc.dtbd_drops = buf->dtb_xamot_drops; 13765 desc.dtbd_errors = buf->dtb_xamot_errors; 13766 desc.dtbd_oldest = 0; 13767 13768 mutex_exit(&dtrace_lock); 13769 13770 /* 13771 * Finally, copy out the buffer description. 13772 */ 13773 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13774 return (EFAULT); 13775 13776 return (0); 13777 } 13778 13779 case DTRACEIOC_CONF: { 13780 dtrace_conf_t conf; 13781 13782 bzero(&conf, sizeof (conf)); 13783 conf.dtc_difversion = DIF_VERSION; 13784 conf.dtc_difintregs = DIF_DIR_NREGS; 13785 conf.dtc_diftupregs = DIF_DTR_NREGS; 13786 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 13787 13788 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 13789 return (EFAULT); 13790 13791 return (0); 13792 } 13793 13794 case DTRACEIOC_STATUS: { 13795 dtrace_status_t stat; 13796 dtrace_dstate_t *dstate; 13797 int i, j; 13798 uint64_t nerrs; 13799 13800 /* 13801 * See the comment in dtrace_state_deadman() for the reason 13802 * for setting dts_laststatus to INT64_MAX before setting 13803 * it to the correct value. 13804 */ 13805 state->dts_laststatus = INT64_MAX; 13806 dtrace_membar_producer(); 13807 state->dts_laststatus = dtrace_gethrtime(); 13808 13809 bzero(&stat, sizeof (stat)); 13810 13811 mutex_enter(&dtrace_lock); 13812 13813 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 13814 mutex_exit(&dtrace_lock); 13815 return (ENOENT); 13816 } 13817 13818 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 13819 stat.dtst_exiting = 1; 13820 13821 nerrs = state->dts_errors; 13822 dstate = &state->dts_vstate.dtvs_dynvars; 13823 13824 for (i = 0; i < NCPU; i++) { 13825 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 13826 13827 stat.dtst_dyndrops += dcpu->dtdsc_drops; 13828 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 13829 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 13830 13831 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 13832 stat.dtst_filled++; 13833 13834 nerrs += state->dts_buffer[i].dtb_errors; 13835 13836 for (j = 0; j < state->dts_nspeculations; j++) { 13837 dtrace_speculation_t *spec; 13838 dtrace_buffer_t *buf; 13839 13840 spec = &state->dts_speculations[j]; 13841 buf = &spec->dtsp_buffer[i]; 13842 stat.dtst_specdrops += buf->dtb_xamot_drops; 13843 } 13844 } 13845 13846 stat.dtst_specdrops_busy = state->dts_speculations_busy; 13847 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 13848 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 13849 stat.dtst_dblerrors = state->dts_dblerrors; 13850 stat.dtst_killed = 13851 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 13852 stat.dtst_errors = nerrs; 13853 13854 mutex_exit(&dtrace_lock); 13855 13856 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 13857 return (EFAULT); 13858 13859 return (0); 13860 } 13861 13862 case DTRACEIOC_FORMAT: { 13863 dtrace_fmtdesc_t fmt; 13864 char *str; 13865 int len; 13866 13867 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 13868 return (EFAULT); 13869 13870 mutex_enter(&dtrace_lock); 13871 13872 if (fmt.dtfd_format == 0 || 13873 fmt.dtfd_format > state->dts_nformats) { 13874 mutex_exit(&dtrace_lock); 13875 return (EINVAL); 13876 } 13877 13878 /* 13879 * Format strings are allocated contiguously and they are 13880 * never freed; if a format index is less than the number 13881 * of formats, we can assert that the format map is non-NULL 13882 * and that the format for the specified index is non-NULL. 13883 */ 13884 ASSERT(state->dts_formats != NULL); 13885 str = state->dts_formats[fmt.dtfd_format - 1]; 13886 ASSERT(str != NULL); 13887 13888 len = strlen(str) + 1; 13889 13890 if (len > fmt.dtfd_length) { 13891 fmt.dtfd_length = len; 13892 13893 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 13894 mutex_exit(&dtrace_lock); 13895 return (EINVAL); 13896 } 13897 } else { 13898 if (copyout(str, fmt.dtfd_string, len) != 0) { 13899 mutex_exit(&dtrace_lock); 13900 return (EINVAL); 13901 } 13902 } 13903 13904 mutex_exit(&dtrace_lock); 13905 return (0); 13906 } 13907 13908 default: 13909 break; 13910 } 13911 13912 return (ENOTTY); 13913 } 13914 13915 /*ARGSUSED*/ 13916 static int 13917 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 13918 { 13919 dtrace_state_t *state; 13920 13921 switch (cmd) { 13922 case DDI_DETACH: 13923 break; 13924 13925 case DDI_SUSPEND: 13926 return (DDI_SUCCESS); 13927 13928 default: 13929 return (DDI_FAILURE); 13930 } 13931 13932 mutex_enter(&cpu_lock); 13933 mutex_enter(&dtrace_provider_lock); 13934 mutex_enter(&dtrace_lock); 13935 13936 ASSERT(dtrace_opens == 0); 13937 13938 if (dtrace_helpers > 0) { 13939 mutex_exit(&dtrace_provider_lock); 13940 mutex_exit(&dtrace_lock); 13941 mutex_exit(&cpu_lock); 13942 return (DDI_FAILURE); 13943 } 13944 13945 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 13946 mutex_exit(&dtrace_provider_lock); 13947 mutex_exit(&dtrace_lock); 13948 mutex_exit(&cpu_lock); 13949 return (DDI_FAILURE); 13950 } 13951 13952 dtrace_provider = NULL; 13953 13954 if ((state = dtrace_anon_grab()) != NULL) { 13955 /* 13956 * If there were ECBs on this state, the provider should 13957 * have not been allowed to detach; assert that there is 13958 * none. 13959 */ 13960 ASSERT(state->dts_necbs == 0); 13961 dtrace_state_destroy(state); 13962 13963 /* 13964 * If we're being detached with anonymous state, we need to 13965 * indicate to the kernel debugger that DTrace is now inactive. 13966 */ 13967 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13968 } 13969 13970 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 13971 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13972 dtrace_cpu_init = NULL; 13973 dtrace_helpers_cleanup = NULL; 13974 dtrace_helpers_fork = NULL; 13975 dtrace_cpustart_init = NULL; 13976 dtrace_cpustart_fini = NULL; 13977 dtrace_debugger_init = NULL; 13978 dtrace_debugger_fini = NULL; 13979 dtrace_kreloc_init = NULL; 13980 dtrace_kreloc_fini = NULL; 13981 dtrace_modload = NULL; 13982 dtrace_modunload = NULL; 13983 13984 mutex_exit(&cpu_lock); 13985 13986 if (dtrace_helptrace_enabled) { 13987 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 13988 dtrace_helptrace_buffer = NULL; 13989 } 13990 13991 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 13992 dtrace_probes = NULL; 13993 dtrace_nprobes = 0; 13994 13995 dtrace_hash_destroy(dtrace_bymod); 13996 dtrace_hash_destroy(dtrace_byfunc); 13997 dtrace_hash_destroy(dtrace_byname); 13998 dtrace_bymod = NULL; 13999 dtrace_byfunc = NULL; 14000 dtrace_byname = NULL; 14001 14002 kmem_cache_destroy(dtrace_state_cache); 14003 vmem_destroy(dtrace_minor); 14004 vmem_destroy(dtrace_arena); 14005 14006 if (dtrace_toxrange != NULL) { 14007 kmem_free(dtrace_toxrange, 14008 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 14009 dtrace_toxrange = NULL; 14010 dtrace_toxranges = 0; 14011 dtrace_toxranges_max = 0; 14012 } 14013 14014 ddi_remove_minor_node(dtrace_devi, NULL); 14015 dtrace_devi = NULL; 14016 14017 ddi_soft_state_fini(&dtrace_softstate); 14018 14019 ASSERT(dtrace_vtime_references == 0); 14020 ASSERT(dtrace_opens == 0); 14021 ASSERT(dtrace_retained == NULL); 14022 14023 mutex_exit(&dtrace_lock); 14024 mutex_exit(&dtrace_provider_lock); 14025 14026 /* 14027 * We don't destroy the task queue until after we have dropped our 14028 * locks (taskq_destroy() may block on running tasks). To prevent 14029 * attempting to do work after we have effectively detached but before 14030 * the task queue has been destroyed, all tasks dispatched via the 14031 * task queue must check that DTrace is still attached before 14032 * performing any operation. 14033 */ 14034 taskq_destroy(dtrace_taskq); 14035 dtrace_taskq = NULL; 14036 14037 return (DDI_SUCCESS); 14038 } 14039 14040 /*ARGSUSED*/ 14041 static int 14042 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 14043 { 14044 int error; 14045 14046 switch (infocmd) { 14047 case DDI_INFO_DEVT2DEVINFO: 14048 *result = (void *)dtrace_devi; 14049 error = DDI_SUCCESS; 14050 break; 14051 case DDI_INFO_DEVT2INSTANCE: 14052 *result = (void *)0; 14053 error = DDI_SUCCESS; 14054 break; 14055 default: 14056 error = DDI_FAILURE; 14057 } 14058 return (error); 14059 } 14060 14061 static struct cb_ops dtrace_cb_ops = { 14062 dtrace_open, /* open */ 14063 dtrace_close, /* close */ 14064 nulldev, /* strategy */ 14065 nulldev, /* print */ 14066 nodev, /* dump */ 14067 nodev, /* read */ 14068 nodev, /* write */ 14069 dtrace_ioctl, /* ioctl */ 14070 nodev, /* devmap */ 14071 nodev, /* mmap */ 14072 nodev, /* segmap */ 14073 nochpoll, /* poll */ 14074 ddi_prop_op, /* cb_prop_op */ 14075 0, /* streamtab */ 14076 D_NEW | D_MP /* Driver compatibility flag */ 14077 }; 14078 14079 static struct dev_ops dtrace_ops = { 14080 DEVO_REV, /* devo_rev */ 14081 0, /* refcnt */ 14082 dtrace_info, /* get_dev_info */ 14083 nulldev, /* identify */ 14084 nulldev, /* probe */ 14085 dtrace_attach, /* attach */ 14086 dtrace_detach, /* detach */ 14087 nodev, /* reset */ 14088 &dtrace_cb_ops, /* driver operations */ 14089 NULL, /* bus operations */ 14090 nodev /* dev power */ 14091 }; 14092 14093 static struct modldrv modldrv = { 14094 &mod_driverops, /* module type (this is a pseudo driver) */ 14095 "Dynamic Tracing", /* name of module */ 14096 &dtrace_ops, /* driver ops */ 14097 }; 14098 14099 static struct modlinkage modlinkage = { 14100 MODREV_1, 14101 (void *)&modldrv, 14102 NULL 14103 }; 14104 14105 int 14106 _init(void) 14107 { 14108 return (mod_install(&modlinkage)); 14109 } 14110 14111 int 14112 _info(struct modinfo *modinfop) 14113 { 14114 return (mod_info(&modlinkage, modinfop)); 14115 } 14116 14117 int 14118 _fini(void) 14119 { 14120 return (mod_remove(&modlinkage)); 14121 } 14122