1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static void *dtrace_softstate; /* softstate pointer */ 172 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 173 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 174 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 175 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 176 static int dtrace_toxranges; /* number of toxic ranges */ 177 static int dtrace_toxranges_max; /* size of toxic range array */ 178 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 179 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 180 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 181 static kthread_t *dtrace_panicked; /* panicking thread */ 182 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 183 static int dtrace_double_errors; /* ERRORs inducing error */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_state_t *dtrace_state; /* temporary variable */ 188 static int dtrace_error; /* temporary variable */ 189 190 /* 191 * DTrace Locking 192 * DTrace is protected by three (relatively coarse-grained) locks: 193 * 194 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 195 * including enabling state, probes, ECBs, consumer state, helper state, 196 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 197 * probe context is lock-free -- synchronization is handled via the 198 * dtrace_sync() cross call mechanism. 199 * 200 * (2) dtrace_provider_lock is required when manipulating provider state, or 201 * when provider state must be held constant. 202 * 203 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 204 * when meta provider state must be held constant. 205 * 206 * The lock ordering between these three locks is dtrace_meta_lock before 207 * dtrace_provider_lock before dtrace_lock. (In particular, there are 208 * several places where dtrace_provider_lock is held by the framework as it 209 * calls into the providers -- which then call back into the framework, 210 * grabbing dtrace_lock.) 211 * 212 * There are two other locks in the mix: mod_lock and cpu_lock. cpu_lock 213 * continues its historical role as a coarse-grained lock; it is acquired 214 * before both dtrace_provider_lock and dtrace_lock. mod_lock is slightly 215 * stranger: it must be acquired _between_ dtrace_provider_lock and 216 * dtrace_lock. 217 */ 218 static kmutex_t dtrace_lock; /* probe state lock */ 219 static kmutex_t dtrace_provider_lock; /* provider state lock */ 220 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 221 222 /* 223 * DTrace Provider Variables 224 * 225 * These are the variables relating to DTrace as a provider (that is, the 226 * provider of the BEGIN, END, and ERROR probes). 227 */ 228 static dtrace_pattr_t dtrace_provider_attr = { 229 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 230 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 }; 235 236 static void 237 dtrace_nullop(void) 238 {} 239 240 static dtrace_pops_t dtrace_provider_ops = { 241 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 242 (void (*)(void *, struct modctl *))dtrace_nullop, 243 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 NULL, 248 NULL, 249 NULL, 250 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 251 }; 252 253 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 254 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 255 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 256 257 /* 258 * DTrace Helper Tracing Variables 259 */ 260 uint32_t dtrace_helptrace_next = 0; 261 uint32_t dtrace_helptrace_nlocals; 262 char *dtrace_helptrace_buffer; 263 int dtrace_helptrace_bufsize = 512 * 1024; 264 265 #ifdef DEBUG 266 int dtrace_helptrace_enabled = 1; 267 #else 268 int dtrace_helptrace_enabled = 0; 269 #endif 270 271 /* 272 * DTrace Error Hashing 273 * 274 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 275 * table. This is very useful for checking coverage of tests that are 276 * expected to induce DIF or DOF processing errors, and may be useful for 277 * debugging problems in the DIF code generator or in DOF generation . The 278 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 279 */ 280 #ifdef DEBUG 281 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 282 static const char *dtrace_errlast; 283 static kthread_t *dtrace_errthread; 284 static kmutex_t dtrace_errlock; 285 #endif 286 287 /* 288 * DTrace Macros and Constants 289 * 290 * These are various macros that are useful in various spots in the 291 * implementation, along with a few random constants that have no meaning 292 * outside of the implementation. There is no real structure to this cpp 293 * mishmash -- but is there ever? 294 */ 295 #define DTRACE_HASHSTR(hash, probe) \ 296 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 297 298 #define DTRACE_HASHNEXT(hash, probe) \ 299 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 300 301 #define DTRACE_HASHPREV(hash, probe) \ 302 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 303 304 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 305 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 306 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 307 308 #define DTRACE_AGGHASHSIZE_SLEW 17 309 310 /* 311 * The key for a thread-local variable consists of the lower 61 bits of the 312 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 313 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 314 * equal to a variable identifier. This is necessary (but not sufficient) to 315 * assure that global associative arrays never collide with thread-local 316 * variables. To guarantee that they cannot collide, we must also define the 317 * order for keying dynamic variables. That order is: 318 * 319 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 320 * 321 * Because the variable-key and the tls-key are in orthogonal spaces, there is 322 * no way for a global variable key signature to match a thread-local key 323 * signature. 324 */ 325 #define DTRACE_TLS_THRKEY(where) { \ 326 uint_t intr = 0; \ 327 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 328 for (; actv; actv >>= 1) \ 329 intr++; \ 330 ASSERT(intr < (1 << 3)); \ 331 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 332 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 333 } 334 335 #define DTRACE_STORE(type, tomax, offset, what) \ 336 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 337 338 #ifndef __i386 339 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 340 if (addr & (size - 1)) { \ 341 *flags |= CPU_DTRACE_BADALIGN; \ 342 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 343 return (0); \ 344 } 345 #else 346 #define DTRACE_ALIGNCHECK(addr, size, flags) 347 #endif 348 349 #define DTRACE_LOADFUNC(bits) \ 350 /*CSTYLED*/ \ 351 uint##bits##_t \ 352 dtrace_load##bits(uintptr_t addr) \ 353 { \ 354 size_t size = bits / NBBY; \ 355 /*CSTYLED*/ \ 356 uint##bits##_t rval; \ 357 int i; \ 358 volatile uint16_t *flags = (volatile uint16_t *) \ 359 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 360 \ 361 DTRACE_ALIGNCHECK(addr, size, flags); \ 362 \ 363 for (i = 0; i < dtrace_toxranges; i++) { \ 364 if (addr >= dtrace_toxrange[i].dtt_limit) \ 365 continue; \ 366 \ 367 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 368 continue; \ 369 \ 370 /* \ 371 * This address falls within a toxic region; return 0. \ 372 */ \ 373 *flags |= CPU_DTRACE_BADADDR; \ 374 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 375 return (0); \ 376 } \ 377 \ 378 *flags |= CPU_DTRACE_NOFAULT; \ 379 /*CSTYLED*/ \ 380 rval = *((volatile uint##bits##_t *)addr); \ 381 *flags &= ~CPU_DTRACE_NOFAULT; \ 382 \ 383 return (rval); \ 384 } 385 386 #ifdef _LP64 387 #define dtrace_loadptr dtrace_load64 388 #else 389 #define dtrace_loadptr dtrace_load32 390 #endif 391 392 #define DTRACE_MATCH_NEXT 0 393 #define DTRACE_MATCH_DONE 1 394 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 395 #define DTRACE_STATE_ALIGN 64 396 397 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 398 static void dtrace_enabling_provide(dtrace_provider_t *); 399 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 400 static void dtrace_enabling_matchall(void); 401 static dtrace_state_t *dtrace_anon_grab(void); 402 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 403 dtrace_state_t *, uint64_t, uint64_t); 404 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 405 static void dtrace_buffer_drop(dtrace_buffer_t *); 406 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 407 dtrace_state_t *, dtrace_mstate_t *); 408 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 409 dtrace_optval_t); 410 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 411 412 /* 413 * DTrace Probe Context Functions 414 * 415 * These functions are called from probe context. Because probe context is 416 * any context in which C may be called, arbitrarily locks may be held, 417 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 418 * As a result, functions called from probe context may only call other DTrace 419 * support functions -- they may not interact at all with the system at large. 420 * (Note that the ASSERT macro is made probe-context safe by redefining it in 421 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 422 * loads are to be performed from probe context, they _must_ be in terms of 423 * the safe dtrace_load*() variants. 424 * 425 * Some functions in this block are not actually called from probe context; 426 * for these functions, there will be a comment above the function reading 427 * "Note: not called from probe context." 428 */ 429 void 430 dtrace_panic(const char *format, ...) 431 { 432 va_list alist; 433 434 va_start(alist, format); 435 dtrace_vpanic(format, alist); 436 va_end(alist); 437 } 438 439 int 440 dtrace_assfail(const char *a, const char *f, int l) 441 { 442 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 443 444 /* 445 * We just need something here that even the most clever compiler 446 * cannot optimize away. 447 */ 448 return (a[(uintptr_t)f]); 449 } 450 451 /* 452 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 453 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 454 */ 455 DTRACE_LOADFUNC(8) 456 DTRACE_LOADFUNC(16) 457 DTRACE_LOADFUNC(32) 458 DTRACE_LOADFUNC(64) 459 460 static int 461 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 462 { 463 if (dest < mstate->dtms_scratch_base) 464 return (0); 465 466 if (dest + size < dest) 467 return (0); 468 469 if (dest + size > mstate->dtms_scratch_ptr) 470 return (0); 471 472 return (1); 473 } 474 475 static int 476 dtrace_canstore_statvar(uint64_t addr, size_t sz, 477 dtrace_statvar_t **svars, int nsvars) 478 { 479 int i; 480 481 for (i = 0; i < nsvars; i++) { 482 dtrace_statvar_t *svar = svars[i]; 483 484 if (svar == NULL || svar->dtsv_size == 0) 485 continue; 486 487 if (addr - svar->dtsv_data < svar->dtsv_size && 488 addr + sz <= svar->dtsv_data + svar->dtsv_size) 489 return (1); 490 } 491 492 return (0); 493 } 494 495 /* 496 * Check to see if the address is within a memory region to which a store may 497 * be issued. This includes the DTrace scratch areas, and any DTrace variable 498 * region. The caller of dtrace_canstore() is responsible for performing any 499 * alignment checks that are needed before stores are actually executed. 500 */ 501 static int 502 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 503 dtrace_vstate_t *vstate) 504 { 505 uintptr_t a; 506 size_t s; 507 508 /* 509 * First, check to see if the address is in scratch space... 510 */ 511 a = mstate->dtms_scratch_base; 512 s = mstate->dtms_scratch_size; 513 514 if (addr - a < s && addr + sz <= a + s) 515 return (1); 516 517 /* 518 * Now check to see if it's a dynamic variable. This check will pick 519 * up both thread-local variables and any global dynamically-allocated 520 * variables. 521 */ 522 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base; 523 s = vstate->dtvs_dynvars.dtds_size; 524 if (addr - a < s && addr + sz <= a + s) 525 return (1); 526 527 /* 528 * Finally, check the static local and global variables. These checks 529 * take the longest, so we perform them last. 530 */ 531 if (dtrace_canstore_statvar(addr, sz, 532 vstate->dtvs_locals, vstate->dtvs_nlocals)) 533 return (1); 534 535 if (dtrace_canstore_statvar(addr, sz, 536 vstate->dtvs_globals, vstate->dtvs_nglobals)) 537 return (1); 538 539 return (0); 540 } 541 542 /* 543 * Compare two strings using safe loads. 544 */ 545 static int 546 dtrace_strncmp(char *s1, char *s2, size_t limit) 547 { 548 uint8_t c1, c2; 549 volatile uint16_t *flags; 550 551 if (s1 == s2 || limit == 0) 552 return (0); 553 554 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 555 556 do { 557 if (s1 == NULL) { 558 c1 = '\0'; 559 } else { 560 c1 = dtrace_load8((uintptr_t)s1++); 561 } 562 563 if (s2 == NULL) { 564 c2 = '\0'; 565 } else { 566 c2 = dtrace_load8((uintptr_t)s2++); 567 } 568 569 if (c1 != c2) 570 return (c1 - c2); 571 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 572 573 return (0); 574 } 575 576 /* 577 * Compute strlen(s) for a string using safe memory accesses. The additional 578 * len parameter is used to specify a maximum length to ensure completion. 579 */ 580 static size_t 581 dtrace_strlen(const char *s, size_t lim) 582 { 583 uint_t len; 584 585 for (len = 0; len != lim; len++) { 586 if (dtrace_load8((uintptr_t)s++) == '\0') 587 break; 588 } 589 590 return (len); 591 } 592 593 /* 594 * Check if an address falls within a toxic region. 595 */ 596 static int 597 dtrace_istoxic(uintptr_t kaddr, size_t size) 598 { 599 uintptr_t taddr, tsize; 600 int i; 601 602 for (i = 0; i < dtrace_toxranges; i++) { 603 taddr = dtrace_toxrange[i].dtt_base; 604 tsize = dtrace_toxrange[i].dtt_limit - taddr; 605 606 if (kaddr - taddr < tsize) { 607 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 608 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 609 return (1); 610 } 611 612 if (taddr - kaddr < size) { 613 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 614 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 615 return (1); 616 } 617 } 618 619 return (0); 620 } 621 622 /* 623 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 624 * memory specified by the DIF program. The dst is assumed to be safe memory 625 * that we can store to directly because it is managed by DTrace. As with 626 * standard bcopy, overlapping copies are handled properly. 627 */ 628 static void 629 dtrace_bcopy(const void *src, void *dst, size_t len) 630 { 631 if (len != 0) { 632 uint8_t *s1 = dst; 633 const uint8_t *s2 = src; 634 635 if (s1 <= s2) { 636 do { 637 *s1++ = dtrace_load8((uintptr_t)s2++); 638 } while (--len != 0); 639 } else { 640 s2 += len; 641 s1 += len; 642 643 do { 644 *--s1 = dtrace_load8((uintptr_t)--s2); 645 } while (--len != 0); 646 } 647 } 648 } 649 650 /* 651 * Copy src to dst using safe memory accesses, up to either the specified 652 * length, or the point that a nul byte is encountered. The src is assumed to 653 * be unsafe memory specified by the DIF program. The dst is assumed to be 654 * safe memory that we can store to directly because it is managed by DTrace. 655 * Unlike dtrace_bcopy(), overlapping regions are not handled. 656 */ 657 static void 658 dtrace_strcpy(const void *src, void *dst, size_t len) 659 { 660 if (len != 0) { 661 uint8_t *s1 = dst, c; 662 const uint8_t *s2 = src; 663 664 do { 665 *s1++ = c = dtrace_load8((uintptr_t)s2++); 666 } while (--len != 0 && c != '\0'); 667 } 668 } 669 670 /* 671 * Copy src to dst, deriving the size and type from the specified (BYREF) 672 * variable type. The src is assumed to be unsafe memory specified by the DIF 673 * program. The dst is assumed to be DTrace variable memory that is of the 674 * specified type; we assume that we can store to directly. 675 */ 676 static void 677 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 678 { 679 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 680 681 if (type->dtdt_kind == DIF_TYPE_STRING) { 682 dtrace_strcpy(src, dst, type->dtdt_size); 683 } else { 684 dtrace_bcopy(src, dst, type->dtdt_size); 685 } 686 } 687 688 /* 689 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 690 * unsafe memory specified by the DIF program. The s2 data is assumed to be 691 * safe memory that we can access directly because it is managed by DTrace. 692 */ 693 static int 694 dtrace_bcmp(const void *s1, const void *s2, size_t len) 695 { 696 volatile uint16_t *flags; 697 698 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 699 700 if (s1 == s2) 701 return (0); 702 703 if (s1 == NULL || s2 == NULL) 704 return (1); 705 706 if (s1 != s2 && len != 0) { 707 const uint8_t *ps1 = s1; 708 const uint8_t *ps2 = s2; 709 710 do { 711 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 712 return (1); 713 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 714 } 715 return (0); 716 } 717 718 /* 719 * Zero the specified region using a simple byte-by-byte loop. Note that this 720 * is for safe DTrace-managed memory only. 721 */ 722 static void 723 dtrace_bzero(void *dst, size_t len) 724 { 725 uchar_t *cp; 726 727 for (cp = dst; len != 0; len--) 728 *cp++ = 0; 729 } 730 731 /* 732 * This privilege checks should be used by actions and subroutines to 733 * verify the credentials of the process that enabled the invoking ECB. 734 */ 735 static int 736 dtrace_priv_proc_common(dtrace_state_t *state) 737 { 738 uid_t uid = state->dts_cred.dcr_uid; 739 gid_t gid = state->dts_cred.dcr_gid; 740 cred_t *cr; 741 proc_t *proc; 742 743 if ((cr = CRED()) != NULL && 744 uid == cr->cr_uid && 745 uid == cr->cr_ruid && 746 uid == cr->cr_suid && 747 gid == cr->cr_gid && 748 gid == cr->cr_rgid && 749 gid == cr->cr_sgid && 750 (proc = ttoproc(curthread)) != NULL && 751 !(proc->p_flag & SNOCD)) 752 return (1); 753 754 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 755 756 return (0); 757 } 758 759 static int 760 dtrace_priv_proc_destructive(dtrace_state_t *state) 761 { 762 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_DESTRUCTIVE) 763 return (1); 764 765 return (dtrace_priv_proc_common(state)); 766 } 767 768 static int 769 dtrace_priv_proc_control(dtrace_state_t *state) 770 { 771 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 772 return (1); 773 774 return (dtrace_priv_proc_common(state)); 775 } 776 777 static int 778 dtrace_priv_proc(dtrace_state_t *state) 779 { 780 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 781 return (1); 782 783 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 784 785 return (0); 786 } 787 788 static int 789 dtrace_priv_kernel(dtrace_state_t *state) 790 { 791 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 792 return (1); 793 794 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 795 796 return (0); 797 } 798 799 static int 800 dtrace_priv_kernel_destructive(dtrace_state_t *state) 801 { 802 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 803 return (1); 804 805 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 806 807 return (0); 808 } 809 810 /* 811 * Note: not called from probe context. This function is called 812 * asynchronously (and at a regular interval) from outside of probe context to 813 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 814 * cleaning is explained in detail in <sys/dtrace_impl.h>. 815 */ 816 void 817 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 818 { 819 dtrace_dynvar_t *dirty; 820 dtrace_dstate_percpu_t *dcpu; 821 int i, work = 0; 822 823 for (i = 0; i < NCPU; i++) { 824 dcpu = &dstate->dtds_percpu[i]; 825 826 ASSERT(dcpu->dtdsc_rinsing == NULL); 827 828 /* 829 * If the dirty list is NULL, there is no dirty work to do. 830 */ 831 if (dcpu->dtdsc_dirty == NULL) 832 continue; 833 834 /* 835 * If the clean list is non-NULL, then we're not going to do 836 * any work for this CPU -- it means that there has not been 837 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 838 * since the last time we cleaned house. 839 */ 840 if (dcpu->dtdsc_clean != NULL) 841 continue; 842 843 work = 1; 844 845 /* 846 * Atomically move the dirty list aside. 847 */ 848 do { 849 dirty = dcpu->dtdsc_dirty; 850 851 /* 852 * Before we zap the dirty list, set the rinsing list. 853 * (This allows for a potential assertion in 854 * dtrace_dynvar(): if a free dynamic variable appears 855 * on a hash chain, either the dirty list or the 856 * rinsing list for some CPU must be non-NULL.) 857 */ 858 dcpu->dtdsc_rinsing = dirty; 859 dtrace_membar_producer(); 860 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 861 dirty, NULL) != dirty); 862 } 863 864 if (!work) { 865 /* 866 * We have no work to do; we can simply return. 867 */ 868 return; 869 } 870 871 dtrace_sync(); 872 873 for (i = 0; i < NCPU; i++) { 874 dcpu = &dstate->dtds_percpu[i]; 875 876 if (dcpu->dtdsc_rinsing == NULL) 877 continue; 878 879 /* 880 * We are now guaranteed that no hash chain contains a pointer 881 * into this dirty list; we can make it clean. 882 */ 883 ASSERT(dcpu->dtdsc_clean == NULL); 884 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 885 dcpu->dtdsc_rinsing = NULL; 886 } 887 888 /* 889 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 890 * sure that all CPUs have seen all of the dtdsc_clean pointers. 891 * This prevents a race whereby a CPU incorrectly decides that 892 * the state should be something other than DTRACE_DSTATE_CLEAN 893 * after dtrace_dynvar_clean() has completed. 894 */ 895 dtrace_sync(); 896 897 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 898 } 899 900 /* 901 * Depending on the value of the op parameter, this function looks-up, 902 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 903 * allocation is requested, this function will return a pointer to a 904 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 905 * variable can be allocated. If NULL is returned, the appropriate counter 906 * will be incremented. 907 */ 908 dtrace_dynvar_t * 909 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 910 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op) 911 { 912 uint64_t hashval = 1; 913 dtrace_dynhash_t *hash = dstate->dtds_hash; 914 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 915 processorid_t me = CPU->cpu_id, cpu = me; 916 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 917 size_t bucket, ksize; 918 size_t chunksize = dstate->dtds_chunksize; 919 uintptr_t kdata, lock, nstate; 920 uint_t i; 921 922 ASSERT(nkeys != 0); 923 924 /* 925 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 926 * algorithm. For the by-value portions, we perform the algorithm in 927 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 928 * bit, and seems to have only a minute effect on distribution. For 929 * the by-reference data, we perform "One-at-a-time" iterating (safely) 930 * over each referenced byte. It's painful to do this, but it's much 931 * better than pathological hash distribution. The efficacy of the 932 * hashing algorithm (and a comparison with other algorithms) may be 933 * found by running the ::dtrace_dynstat MDB dcmd. 934 */ 935 for (i = 0; i < nkeys; i++) { 936 if (key[i].dttk_size == 0) { 937 uint64_t val = key[i].dttk_value; 938 939 hashval += (val >> 48) & 0xffff; 940 hashval += (hashval << 10); 941 hashval ^= (hashval >> 6); 942 943 hashval += (val >> 32) & 0xffff; 944 hashval += (hashval << 10); 945 hashval ^= (hashval >> 6); 946 947 hashval += (val >> 16) & 0xffff; 948 hashval += (hashval << 10); 949 hashval ^= (hashval >> 6); 950 951 hashval += val & 0xffff; 952 hashval += (hashval << 10); 953 hashval ^= (hashval >> 6); 954 } else { 955 /* 956 * This is incredibly painful, but it beats the hell 957 * out of the alternative. 958 */ 959 uint64_t j, size = key[i].dttk_size; 960 uintptr_t base = (uintptr_t)key[i].dttk_value; 961 962 for (j = 0; j < size; j++) { 963 hashval += dtrace_load8(base + j); 964 hashval += (hashval << 10); 965 hashval ^= (hashval >> 6); 966 } 967 } 968 } 969 970 hashval += (hashval << 3); 971 hashval ^= (hashval >> 11); 972 hashval += (hashval << 15); 973 974 /* 975 * There is a remote chance (ideally, 1 in 2^32) that our hashval 976 * comes out to be 0. We rely on a zero hashval denoting a free 977 * element; if this actually happens, we set the hashval to 1. 978 */ 979 if (hashval == 0) 980 hashval = 1; 981 982 /* 983 * Yes, it's painful to do a divide here. If the cycle count becomes 984 * important here, tricks can be pulled to reduce it. (However, it's 985 * critical that hash collisions be kept to an absolute minimum; 986 * they're much more painful than a divide.) It's better to have a 987 * solution that generates few collisions and still keeps things 988 * relatively simple. 989 */ 990 bucket = hashval % dstate->dtds_hashsize; 991 992 if (op == DTRACE_DYNVAR_DEALLOC) { 993 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 994 995 for (;;) { 996 while ((lock = *lockp) & 1) 997 continue; 998 999 if (dtrace_casptr((void *)lockp, 1000 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1001 break; 1002 } 1003 1004 dtrace_membar_producer(); 1005 } 1006 1007 top: 1008 prev = NULL; 1009 lock = hash[bucket].dtdh_lock; 1010 1011 dtrace_membar_consumer(); 1012 1013 start = hash[bucket].dtdh_chain; 1014 ASSERT(start == NULL || start->dtdv_hashval != 0 || 1015 op != DTRACE_DYNVAR_DEALLOC); 1016 1017 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1018 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1019 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1020 1021 if (dvar->dtdv_hashval != hashval) { 1022 if (dvar->dtdv_hashval == 0) { 1023 /* 1024 * We've gone off the rails. Somewhere 1025 * along the line, one of the members of this 1026 * hash chain was deleted. We could assert 1027 * that either the dirty list or the rinsing 1028 * list is non-NULL. (The dtrace_sync() in 1029 * dtrace_dynvar_clean() would validate this 1030 * assertion.) 1031 */ 1032 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1033 goto top; 1034 } 1035 1036 goto next; 1037 } 1038 1039 if (dtuple->dtt_nkeys != nkeys) 1040 goto next; 1041 1042 for (i = 0; i < nkeys; i++, dkey++) { 1043 if (dkey->dttk_size != key[i].dttk_size) 1044 goto next; /* size or type mismatch */ 1045 1046 if (dkey->dttk_size != 0) { 1047 if (dtrace_bcmp( 1048 (void *)(uintptr_t)key[i].dttk_value, 1049 (void *)(uintptr_t)dkey->dttk_value, 1050 dkey->dttk_size)) 1051 goto next; 1052 } else { 1053 if (dkey->dttk_value != key[i].dttk_value) 1054 goto next; 1055 } 1056 } 1057 1058 if (op != DTRACE_DYNVAR_DEALLOC) 1059 return (dvar); 1060 1061 ASSERT(dvar->dtdv_next == NULL || 1062 dvar->dtdv_next->dtdv_hashval != 0); 1063 1064 if (prev != NULL) { 1065 ASSERT(hash[bucket].dtdh_chain != dvar); 1066 ASSERT(start != dvar); 1067 ASSERT(prev->dtdv_next == dvar); 1068 prev->dtdv_next = dvar->dtdv_next; 1069 } else { 1070 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1071 start, dvar->dtdv_next) != start) { 1072 /* 1073 * We have failed to atomically swing the 1074 * hash table head pointer, presumably because 1075 * of a conflicting allocation on another CPU. 1076 * We need to reread the hash chain and try 1077 * again. 1078 */ 1079 goto top; 1080 } 1081 } 1082 1083 dtrace_membar_producer(); 1084 1085 /* 1086 * Now clear the hash value to indicate that it's free. 1087 */ 1088 ASSERT(hash[bucket].dtdh_chain != dvar); 1089 dvar->dtdv_hashval = 0; 1090 1091 dtrace_membar_producer(); 1092 1093 /* 1094 * Set the next pointer to point at the dirty list, and 1095 * atomically swing the dirty pointer to the newly freed dvar. 1096 */ 1097 do { 1098 next = dcpu->dtdsc_dirty; 1099 dvar->dtdv_next = next; 1100 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1101 1102 /* 1103 * Finally, unlock this hash bucket. 1104 */ 1105 ASSERT(hash[bucket].dtdh_lock == lock); 1106 ASSERT(lock & 1); 1107 hash[bucket].dtdh_lock++; 1108 1109 return (NULL); 1110 next: 1111 prev = dvar; 1112 continue; 1113 } 1114 1115 if (op != DTRACE_DYNVAR_ALLOC) { 1116 /* 1117 * If we are not to allocate a new variable, we want to 1118 * return NULL now. Before we return, check that the value 1119 * of the lock word hasn't changed. If it has, we may have 1120 * seen an inconsistent snapshot. 1121 */ 1122 if (op == DTRACE_DYNVAR_NOALLOC) { 1123 if (hash[bucket].dtdh_lock != lock) 1124 goto top; 1125 } else { 1126 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1127 ASSERT(hash[bucket].dtdh_lock == lock); 1128 ASSERT(lock & 1); 1129 hash[bucket].dtdh_lock++; 1130 } 1131 1132 return (NULL); 1133 } 1134 1135 /* 1136 * We need to allocate a new dynamic variable. The size we need is the 1137 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1138 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1139 * the size of any referred-to data (dsize). We then round the final 1140 * size up to the chunksize for allocation. 1141 */ 1142 for (ksize = 0, i = 0; i < nkeys; i++) 1143 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1144 1145 /* 1146 * This should be pretty much impossible, but could happen if, say, 1147 * strange DIF specified the tuple. Ideally, this should be an 1148 * assertion and not an error condition -- but that requires that the 1149 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1150 * bullet-proof. (That is, it must not be able to be fooled by 1151 * malicious DIF.) Given the lack of backwards branches in DIF, 1152 * solving this would presumably not amount to solving the Halting 1153 * Problem -- but it still seems awfully hard. 1154 */ 1155 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1156 ksize + dsize > chunksize) { 1157 dcpu->dtdsc_drops++; 1158 return (NULL); 1159 } 1160 1161 nstate = DTRACE_DSTATE_EMPTY; 1162 1163 do { 1164 retry: 1165 free = dcpu->dtdsc_free; 1166 1167 if (free == NULL) { 1168 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1169 void *rval; 1170 1171 if (clean == NULL) { 1172 /* 1173 * We're out of dynamic variable space on 1174 * this CPU. Unless we have tried all CPUs, 1175 * we'll try to allocate from a different 1176 * CPU. 1177 */ 1178 switch (dstate->dtds_state) { 1179 case DTRACE_DSTATE_CLEAN: { 1180 void *sp = &dstate->dtds_state; 1181 1182 if (++cpu >= NCPU) 1183 cpu = 0; 1184 1185 if (dcpu->dtdsc_dirty != NULL && 1186 nstate == DTRACE_DSTATE_EMPTY) 1187 nstate = DTRACE_DSTATE_DIRTY; 1188 1189 if (dcpu->dtdsc_rinsing != NULL) 1190 nstate = DTRACE_DSTATE_RINSING; 1191 1192 dcpu = &dstate->dtds_percpu[cpu]; 1193 1194 if (cpu != me) 1195 goto retry; 1196 1197 (void) dtrace_cas32(sp, 1198 DTRACE_DSTATE_CLEAN, nstate); 1199 1200 /* 1201 * To increment the correct bean 1202 * counter, take another lap. 1203 */ 1204 goto retry; 1205 } 1206 1207 case DTRACE_DSTATE_DIRTY: 1208 dcpu->dtdsc_dirty_drops++; 1209 break; 1210 1211 case DTRACE_DSTATE_RINSING: 1212 dcpu->dtdsc_rinsing_drops++; 1213 break; 1214 1215 case DTRACE_DSTATE_EMPTY: 1216 dcpu->dtdsc_drops++; 1217 break; 1218 } 1219 1220 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1221 return (NULL); 1222 } 1223 1224 /* 1225 * The clean list appears to be non-empty. We want to 1226 * move the clean list to the free list; we start by 1227 * moving the clean pointer aside. 1228 */ 1229 if (dtrace_casptr(&dcpu->dtdsc_clean, 1230 clean, NULL) != clean) { 1231 /* 1232 * We are in one of two situations: 1233 * 1234 * (a) The clean list was switched to the 1235 * free list by another CPU. 1236 * 1237 * (b) The clean list was added to by the 1238 * cleansing cyclic. 1239 * 1240 * In either of these situations, we can 1241 * just reattempt the free list allocation. 1242 */ 1243 goto retry; 1244 } 1245 1246 ASSERT(clean->dtdv_hashval == 0); 1247 1248 /* 1249 * Now we'll move the clean list to the free list. 1250 * It's impossible for this to fail: the only way 1251 * the free list can be updated is through this 1252 * code path, and only one CPU can own the clean list. 1253 * Thus, it would only be possible for this to fail if 1254 * this code were racing with dtrace_dynvar_clean(). 1255 * (That is, if dtrace_dynvar_clean() updated the clean 1256 * list, and we ended up racing to update the free 1257 * list.) This race is prevented by the dtrace_sync() 1258 * in dtrace_dynvar_clean() -- which flushes the 1259 * owners of the clean lists out before resetting 1260 * the clean lists. 1261 */ 1262 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1263 ASSERT(rval == NULL); 1264 goto retry; 1265 } 1266 1267 dvar = free; 1268 new_free = dvar->dtdv_next; 1269 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1270 1271 /* 1272 * We have now allocated a new chunk. We copy the tuple keys into the 1273 * tuple array and copy any referenced key data into the data space 1274 * following the tuple array. As we do this, we relocate dttk_value 1275 * in the final tuple to point to the key data address in the chunk. 1276 */ 1277 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1278 dvar->dtdv_data = (void *)(kdata + ksize); 1279 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1280 1281 for (i = 0; i < nkeys; i++) { 1282 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1283 size_t kesize = key[i].dttk_size; 1284 1285 if (kesize != 0) { 1286 dtrace_bcopy( 1287 (const void *)(uintptr_t)key[i].dttk_value, 1288 (void *)kdata, kesize); 1289 dkey->dttk_value = kdata; 1290 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1291 } else { 1292 dkey->dttk_value = key[i].dttk_value; 1293 } 1294 1295 dkey->dttk_size = kesize; 1296 } 1297 1298 ASSERT(dvar->dtdv_hashval == 0); 1299 dvar->dtdv_hashval = hashval; 1300 dvar->dtdv_next = start; 1301 1302 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1303 return (dvar); 1304 1305 /* 1306 * The cas has failed. Either another CPU is adding an element to 1307 * this hash chain, or another CPU is deleting an element from this 1308 * hash chain. The simplest way to deal with both of these cases 1309 * (though not necessarily the most efficient) is to free our 1310 * allocated block and tail-call ourselves. Note that the free is 1311 * to the dirty list and _not_ to the free list. This is to prevent 1312 * races with allocators, above. 1313 */ 1314 dvar->dtdv_hashval = 0; 1315 1316 dtrace_membar_producer(); 1317 1318 do { 1319 free = dcpu->dtdsc_dirty; 1320 dvar->dtdv_next = free; 1321 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1322 1323 return (dtrace_dynvar(dstate, nkeys, key, dsize, op)); 1324 } 1325 1326 static void 1327 dtrace_aggregate_min(uint64_t *oval, uint64_t nval) 1328 { 1329 if (nval < *oval) 1330 *oval = nval; 1331 } 1332 1333 static void 1334 dtrace_aggregate_max(uint64_t *oval, uint64_t nval) 1335 { 1336 if (nval > *oval) 1337 *oval = nval; 1338 } 1339 1340 static void 1341 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval) 1342 { 1343 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1344 int64_t val = (int64_t)nval; 1345 1346 if (val < 0) { 1347 for (i = 0; i < zero; i++) { 1348 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1349 quanta[i]++; 1350 return; 1351 } 1352 } 1353 } else { 1354 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1355 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1356 quanta[i - 1]++; 1357 return; 1358 } 1359 } 1360 1361 quanta[DTRACE_QUANTIZE_NBUCKETS - 1]++; 1362 return; 1363 } 1364 1365 ASSERT(0); 1366 } 1367 1368 static void 1369 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval) 1370 { 1371 uint64_t arg = *lquanta++; 1372 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1373 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1374 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1375 int32_t val = (int32_t)nval, level; 1376 1377 ASSERT(step != 0); 1378 ASSERT(levels != 0); 1379 1380 if (val < base) { 1381 /* 1382 * This is an underflow. 1383 */ 1384 lquanta[0]++; 1385 return; 1386 } 1387 1388 level = (val - base) / step; 1389 1390 if (level < levels) { 1391 lquanta[level + 1]++; 1392 return; 1393 } 1394 1395 /* 1396 * This is an overflow. 1397 */ 1398 lquanta[levels + 1]++; 1399 } 1400 1401 static void 1402 dtrace_aggregate_avg(uint64_t *data, uint64_t nval) 1403 { 1404 data[0]++; 1405 data[1] += nval; 1406 } 1407 1408 /*ARGSUSED*/ 1409 static void 1410 dtrace_aggregate_count(uint64_t *oval, uint64_t nval) 1411 { 1412 *oval = *oval + 1; 1413 } 1414 1415 /*ARGSUSED*/ 1416 static void 1417 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval) 1418 { 1419 *oval += nval; 1420 } 1421 1422 /* 1423 * Aggregate given the tuple in the principal data buffer, and the aggregating 1424 * action denoted by the specified dtrace_aggregation_t. The aggregation 1425 * buffer is specified as the buf parameter. This routine does not return 1426 * failure; if there is no space in the aggregation buffer, the data will be 1427 * dropped, and a corresponding counter incremented. 1428 */ 1429 static void 1430 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1431 intptr_t offset, dtrace_buffer_t *buf, uint64_t arg) 1432 { 1433 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1434 uint32_t i, ndx, size, fsize; 1435 uint32_t align = sizeof (uint64_t) - 1; 1436 dtrace_aggbuffer_t *agb; 1437 dtrace_aggkey_t *key; 1438 uint32_t hashval = 0; 1439 caddr_t tomax, data, kdata; 1440 dtrace_actkind_t action; 1441 uintptr_t offs; 1442 1443 if (buf == NULL) 1444 return; 1445 1446 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1447 size = rec->dtrd_offset - agg->dtag_base; 1448 fsize = size + rec->dtrd_size; 1449 1450 ASSERT(dbuf->dtb_tomax != NULL); 1451 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1452 1453 if ((tomax = buf->dtb_tomax) == NULL) { 1454 dtrace_buffer_drop(buf); 1455 return; 1456 } 1457 1458 /* 1459 * The metastructure is always at the bottom of the buffer. 1460 */ 1461 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1462 sizeof (dtrace_aggbuffer_t)); 1463 1464 if (buf->dtb_offset == 0) { 1465 /* 1466 * We just kludge up approximately 1/8th of the size to be 1467 * buckets. If this guess ends up being routinely 1468 * off-the-mark, we may need to dynamically readjust this 1469 * based on past performance. 1470 */ 1471 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1472 1473 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1474 (uintptr_t)tomax || hashsize == 0) { 1475 /* 1476 * We've been given a ludicrously small buffer; 1477 * increment our drop count and leave. 1478 */ 1479 dtrace_buffer_drop(buf); 1480 return; 1481 } 1482 1483 /* 1484 * And now, a pathetic attempt to try to get a an odd (or 1485 * perchance, a prime) hash size for better hash distribution. 1486 */ 1487 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1488 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1489 1490 agb->dtagb_hashsize = hashsize; 1491 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1492 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1493 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1494 1495 for (i = 0; i < agb->dtagb_hashsize; i++) 1496 agb->dtagb_hash[i] = NULL; 1497 } 1498 1499 /* 1500 * Calculate the hash value based on the key. Note that we _don't_ 1501 * include the aggid in the hashing (but we will store it as part of 1502 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1503 * algorithm: a simple, quick algorithm that has no known funnels, and 1504 * gets good distribution in practice. The efficacy of the hashing 1505 * algorithm (and a comparison with other algorithms) may be found by 1506 * running the ::dtrace_aggstat MDB dcmd. 1507 */ 1508 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1509 hashval += data[i]; 1510 hashval += (hashval << 10); 1511 hashval ^= (hashval >> 6); 1512 } 1513 1514 hashval += (hashval << 3); 1515 hashval ^= (hashval >> 11); 1516 hashval += (hashval << 15); 1517 1518 /* 1519 * Yes, the divide here is expensive. If the cycle count here becomes 1520 * prohibitive, we can do tricks to eliminate it. 1521 */ 1522 ndx = hashval % agb->dtagb_hashsize; 1523 1524 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1525 ASSERT((caddr_t)key >= tomax); 1526 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1527 1528 if (hashval != key->dtak_hashval || key->dtak_size != size) 1529 continue; 1530 1531 kdata = key->dtak_data; 1532 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1533 1534 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1535 if (kdata[i] != data[i]) 1536 goto next; 1537 } 1538 1539 if (action != key->dtak_action) { 1540 /* 1541 * We are aggregating on the same value in the same 1542 * aggregation with two different aggregating actions. 1543 * (This should have been picked up in the compiler, 1544 * so we may be dealing with errant or devious DIF.) 1545 * This is an error condition; we indicate as much, 1546 * and return. 1547 */ 1548 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1549 return; 1550 } 1551 1552 /* 1553 * This is a hit: we need to apply the aggregator to 1554 * the value at this key. 1555 */ 1556 agg->dtag_aggregate((uint64_t *)(kdata + size), arg); 1557 return; 1558 next: 1559 continue; 1560 } 1561 1562 /* 1563 * We didn't find it. We need to allocate some zero-filled space, 1564 * link it into the hash table appropriately, and apply the aggregator 1565 * to the (zero-filled) value. 1566 */ 1567 offs = buf->dtb_offset; 1568 while (offs & (align - 1)) 1569 offs += sizeof (uint32_t); 1570 1571 /* 1572 * If we don't have enough room to both allocate a new key _and_ 1573 * its associated data, increment the drop count and return. 1574 */ 1575 if ((uintptr_t)tomax + offs + fsize > 1576 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1577 dtrace_buffer_drop(buf); 1578 return; 1579 } 1580 1581 /*CONSTCOND*/ 1582 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1583 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1584 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1585 1586 key->dtak_data = kdata = tomax + offs; 1587 buf->dtb_offset = offs + fsize; 1588 1589 /* 1590 * Now copy the data across. 1591 */ 1592 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1593 1594 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1595 kdata[i] = data[i]; 1596 1597 for (i = size; i < fsize; i++) 1598 kdata[i] = 0; 1599 1600 key->dtak_hashval = hashval; 1601 key->dtak_size = size; 1602 key->dtak_action = action; 1603 key->dtak_next = agb->dtagb_hash[ndx]; 1604 agb->dtagb_hash[ndx] = key; 1605 1606 /* 1607 * Finally, apply the aggregator. 1608 */ 1609 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1610 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), arg); 1611 } 1612 1613 /* 1614 * Given consumer state, this routine finds a speculation in the INACTIVE 1615 * state and transitions it into the ACTIVE state. If there is no speculation 1616 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1617 * incremented -- it is up to the caller to take appropriate action. 1618 */ 1619 static int 1620 dtrace_speculation(dtrace_state_t *state) 1621 { 1622 int i = 0; 1623 dtrace_speculation_state_t current; 1624 uint32_t *stat = &state->dts_speculations_unavail, count; 1625 1626 while (i < state->dts_nspeculations) { 1627 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1628 1629 current = spec->dtsp_state; 1630 1631 if (current != DTRACESPEC_INACTIVE) { 1632 if (current == DTRACESPEC_COMMITTINGMANY || 1633 current == DTRACESPEC_COMMITTING || 1634 current == DTRACESPEC_DISCARDING) 1635 stat = &state->dts_speculations_busy; 1636 i++; 1637 continue; 1638 } 1639 1640 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1641 current, DTRACESPEC_ACTIVE) == current) 1642 return (i + 1); 1643 } 1644 1645 /* 1646 * We couldn't find a speculation. If we found as much as a single 1647 * busy speculation buffer, we'll attribute this failure as "busy" 1648 * instead of "unavail". 1649 */ 1650 do { 1651 count = *stat; 1652 } while (dtrace_cas32(stat, count, count + 1) != count); 1653 1654 return (0); 1655 } 1656 1657 /* 1658 * This routine commits an active speculation. If the specified speculation 1659 * is not in a valid state to perform a commit(), this routine will silently do 1660 * nothing. The state of the specified speculation is transitioned according 1661 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1662 */ 1663 static void 1664 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1665 dtrace_specid_t which) 1666 { 1667 dtrace_speculation_t *spec; 1668 dtrace_buffer_t *src, *dest; 1669 uintptr_t daddr, saddr, dlimit; 1670 dtrace_speculation_state_t current, new; 1671 intptr_t offs; 1672 1673 if (which == 0) 1674 return; 1675 1676 if (which > state->dts_nspeculations) { 1677 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1678 return; 1679 } 1680 1681 spec = &state->dts_speculations[which - 1]; 1682 src = &spec->dtsp_buffer[cpu]; 1683 dest = &state->dts_buffer[cpu]; 1684 1685 do { 1686 current = spec->dtsp_state; 1687 1688 if (current == DTRACESPEC_COMMITTINGMANY) 1689 break; 1690 1691 switch (current) { 1692 case DTRACESPEC_INACTIVE: 1693 case DTRACESPEC_DISCARDING: 1694 return; 1695 1696 case DTRACESPEC_COMMITTING: 1697 /* 1698 * This is only possible if we are (a) commit()'ing 1699 * without having done a prior speculate() on this CPU 1700 * and (b) racing with another commit() on a different 1701 * CPU. There's nothing to do -- we just assert that 1702 * our offset is 0. 1703 */ 1704 ASSERT(src->dtb_offset == 0); 1705 return; 1706 1707 case DTRACESPEC_ACTIVE: 1708 new = DTRACESPEC_COMMITTING; 1709 break; 1710 1711 case DTRACESPEC_ACTIVEONE: 1712 /* 1713 * This speculation is active on one CPU. If our 1714 * buffer offset is non-zero, we know that the one CPU 1715 * must be us. Otherwise, we are committing on a 1716 * different CPU from the speculate(), and we must 1717 * rely on being asynchronously cleaned. 1718 */ 1719 if (src->dtb_offset != 0) { 1720 new = DTRACESPEC_COMMITTING; 1721 break; 1722 } 1723 /*FALLTHROUGH*/ 1724 1725 case DTRACESPEC_ACTIVEMANY: 1726 new = DTRACESPEC_COMMITTINGMANY; 1727 break; 1728 1729 default: 1730 ASSERT(0); 1731 } 1732 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1733 current, new) != current); 1734 1735 /* 1736 * We have set the state to indicate that we are committing this 1737 * speculation. Now reserve the necessary space in the destination 1738 * buffer. 1739 */ 1740 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 1741 sizeof (uint64_t), state, NULL)) < 0) { 1742 dtrace_buffer_drop(dest); 1743 goto out; 1744 } 1745 1746 /* 1747 * We have the space; copy the buffer across. (Note that this is a 1748 * highly subobtimal bcopy(); in the unlikely event that this becomes 1749 * a serious performance issue, a high-performance DTrace-specific 1750 * bcopy() should obviously be invented.) 1751 */ 1752 daddr = (uintptr_t)dest->dtb_tomax + offs; 1753 dlimit = daddr + src->dtb_offset; 1754 saddr = (uintptr_t)src->dtb_tomax; 1755 1756 /* 1757 * First, the aligned portion. 1758 */ 1759 while (dlimit - daddr >= sizeof (uint64_t)) { 1760 *((uint64_t *)daddr) = *((uint64_t *)saddr); 1761 1762 daddr += sizeof (uint64_t); 1763 saddr += sizeof (uint64_t); 1764 } 1765 1766 /* 1767 * Now any left-over bit... 1768 */ 1769 while (dlimit - daddr) 1770 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 1771 1772 /* 1773 * Finally, commit the reserved space in the destination buffer. 1774 */ 1775 dest->dtb_offset = offs + src->dtb_offset; 1776 1777 out: 1778 /* 1779 * If we're lucky enough to be the only active CPU on this speculation 1780 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 1781 */ 1782 if (current == DTRACESPEC_ACTIVE || 1783 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 1784 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 1785 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 1786 1787 ASSERT(rval == DTRACESPEC_COMMITTING); 1788 } 1789 1790 src->dtb_offset = 0; 1791 src->dtb_xamot_drops += src->dtb_drops; 1792 src->dtb_drops = 0; 1793 } 1794 1795 /* 1796 * This routine discards an active speculation. If the specified speculation 1797 * is not in a valid state to perform a discard(), this routine will silently 1798 * do nothing. The state of the specified speculation is transitioned 1799 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 1800 */ 1801 static void 1802 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 1803 dtrace_specid_t which) 1804 { 1805 dtrace_speculation_t *spec; 1806 dtrace_speculation_state_t current, new; 1807 dtrace_buffer_t *buf; 1808 1809 if (which == 0) 1810 return; 1811 1812 if (which > state->dts_nspeculations) { 1813 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1814 return; 1815 } 1816 1817 spec = &state->dts_speculations[which - 1]; 1818 buf = &spec->dtsp_buffer[cpu]; 1819 1820 do { 1821 current = spec->dtsp_state; 1822 1823 switch (current) { 1824 case DTRACESPEC_INACTIVE: 1825 case DTRACESPEC_COMMITTINGMANY: 1826 case DTRACESPEC_COMMITTING: 1827 case DTRACESPEC_DISCARDING: 1828 return; 1829 1830 case DTRACESPEC_ACTIVE: 1831 case DTRACESPEC_ACTIVEMANY: 1832 new = DTRACESPEC_DISCARDING; 1833 break; 1834 1835 case DTRACESPEC_ACTIVEONE: 1836 if (buf->dtb_offset != 0) { 1837 new = DTRACESPEC_INACTIVE; 1838 } else { 1839 new = DTRACESPEC_DISCARDING; 1840 } 1841 break; 1842 1843 default: 1844 ASSERT(0); 1845 } 1846 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1847 current, new) != current); 1848 1849 buf->dtb_offset = 0; 1850 buf->dtb_drops = 0; 1851 } 1852 1853 /* 1854 * Note: not called from probe context. This function is called 1855 * asynchronously from cross call context to clean any speculations that are 1856 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 1857 * transitioned back to the INACTIVE state until all CPUs have cleaned the 1858 * speculation. 1859 */ 1860 static void 1861 dtrace_speculation_clean_here(dtrace_state_t *state) 1862 { 1863 dtrace_icookie_t cookie; 1864 processorid_t cpu = CPU->cpu_id; 1865 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 1866 dtrace_specid_t i; 1867 1868 cookie = dtrace_interrupt_disable(); 1869 1870 if (dest->dtb_tomax == NULL) { 1871 dtrace_interrupt_enable(cookie); 1872 return; 1873 } 1874 1875 for (i = 0; i < state->dts_nspeculations; i++) { 1876 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1877 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 1878 1879 if (src->dtb_tomax == NULL) 1880 continue; 1881 1882 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 1883 src->dtb_offset = 0; 1884 continue; 1885 } 1886 1887 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1888 continue; 1889 1890 if (src->dtb_offset == 0) 1891 continue; 1892 1893 dtrace_speculation_commit(state, cpu, i + 1); 1894 } 1895 1896 dtrace_interrupt_enable(cookie); 1897 } 1898 1899 /* 1900 * Note: not called from probe context. This function is called 1901 * asynchronously (and at a regular interval) to clean any speculations that 1902 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 1903 * is work to be done, it cross calls all CPUs to perform that work; 1904 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 1905 * INACTIVE state until they have been cleaned by all CPUs. 1906 */ 1907 static void 1908 dtrace_speculation_clean(dtrace_state_t *state) 1909 { 1910 int work = 0, rv; 1911 dtrace_specid_t i; 1912 1913 for (i = 0; i < state->dts_nspeculations; i++) { 1914 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1915 1916 ASSERT(!spec->dtsp_cleaning); 1917 1918 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 1919 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1920 continue; 1921 1922 work++; 1923 spec->dtsp_cleaning = 1; 1924 } 1925 1926 if (!work) 1927 return; 1928 1929 dtrace_xcall(DTRACE_CPUALL, 1930 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 1931 1932 /* 1933 * We now know that all CPUs have committed or discarded their 1934 * speculation buffers, as appropriate. We can now set the state 1935 * to inactive. 1936 */ 1937 for (i = 0; i < state->dts_nspeculations; i++) { 1938 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1939 dtrace_speculation_state_t current, new; 1940 1941 if (!spec->dtsp_cleaning) 1942 continue; 1943 1944 current = spec->dtsp_state; 1945 ASSERT(current == DTRACESPEC_DISCARDING || 1946 current == DTRACESPEC_COMMITTINGMANY); 1947 1948 new = DTRACESPEC_INACTIVE; 1949 1950 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 1951 ASSERT(rv == current); 1952 spec->dtsp_cleaning = 0; 1953 } 1954 } 1955 1956 /* 1957 * Called as part of a speculate() to get the speculative buffer associated 1958 * with a given speculation. Returns NULL if the specified speculation is not 1959 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 1960 * the active CPU is not the specified CPU -- the speculation will be 1961 * atomically transitioned into the ACTIVEMANY state. 1962 */ 1963 static dtrace_buffer_t * 1964 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 1965 dtrace_specid_t which) 1966 { 1967 dtrace_speculation_t *spec; 1968 dtrace_speculation_state_t current, new; 1969 dtrace_buffer_t *buf; 1970 1971 if (which == 0) 1972 return (NULL); 1973 1974 if (which > state->dts_nspeculations) { 1975 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1976 return (NULL); 1977 } 1978 1979 spec = &state->dts_speculations[which - 1]; 1980 buf = &spec->dtsp_buffer[cpuid]; 1981 1982 do { 1983 current = spec->dtsp_state; 1984 1985 switch (current) { 1986 case DTRACESPEC_INACTIVE: 1987 case DTRACESPEC_COMMITTINGMANY: 1988 case DTRACESPEC_DISCARDING: 1989 return (NULL); 1990 1991 case DTRACESPEC_COMMITTING: 1992 ASSERT(buf->dtb_offset == 0); 1993 return (NULL); 1994 1995 case DTRACESPEC_ACTIVEONE: 1996 /* 1997 * This speculation is currently active on one CPU. 1998 * Check the offset in the buffer; if it's non-zero, 1999 * that CPU must be us (and we leave the state alone). 2000 * If it's zero, assume that we're starting on a new 2001 * CPU -- and change the state to indicate that the 2002 * speculation is active on more than one CPU. 2003 */ 2004 if (buf->dtb_offset != 0) 2005 return (buf); 2006 2007 new = DTRACESPEC_ACTIVEMANY; 2008 break; 2009 2010 case DTRACESPEC_ACTIVEMANY: 2011 return (buf); 2012 2013 case DTRACESPEC_ACTIVE: 2014 new = DTRACESPEC_ACTIVEONE; 2015 break; 2016 2017 default: 2018 ASSERT(0); 2019 } 2020 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2021 current, new) != current); 2022 2023 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2024 return (buf); 2025 } 2026 2027 /* 2028 * This function implements the DIF emulator's variable lookups. The emulator 2029 * passes a reserved variable identifier and optional built-in array index. 2030 */ 2031 static uint64_t 2032 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2033 uint64_t i) 2034 { 2035 /* 2036 * If we're accessing one of the uncached arguments, we'll turn this 2037 * into a reference in the args array. 2038 */ 2039 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2040 i = v - DIF_VAR_ARG0; 2041 v = DIF_VAR_ARGS; 2042 } 2043 2044 switch (v) { 2045 case DIF_VAR_ARGS: 2046 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2047 if (i >= sizeof (mstate->dtms_arg) / 2048 sizeof (mstate->dtms_arg[0])) { 2049 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2050 dtrace_provider_t *pv; 2051 uint64_t val; 2052 2053 pv = mstate->dtms_probe->dtpr_provider; 2054 if (pv->dtpv_pops.dtps_getargval != NULL) 2055 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2056 mstate->dtms_probe->dtpr_id, 2057 mstate->dtms_probe->dtpr_arg, i, aframes); 2058 else 2059 val = dtrace_getarg(i, aframes); 2060 2061 /* 2062 * This is regrettably required to keep the compiler 2063 * from tail-optimizing the call to dtrace_getarg(). 2064 * The condition always evaluates to true, but the 2065 * compiler has no way of figuring that out a priori. 2066 * (None of this would be necessary if the compiler 2067 * could be relied upon to _always_ tail-optimize 2068 * the call to dtrace_getarg() -- but it can't.) 2069 */ 2070 if (mstate->dtms_probe != NULL) 2071 return (val); 2072 2073 ASSERT(0); 2074 } 2075 2076 return (mstate->dtms_arg[i]); 2077 2078 case DIF_VAR_UREGS: { 2079 klwp_t *lwp; 2080 2081 if (!dtrace_priv_proc(state)) 2082 return (0); 2083 2084 if ((lwp = curthread->t_lwp) == NULL) { 2085 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2086 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2087 return (0); 2088 } 2089 2090 return (dtrace_getreg(lwp->lwp_regs, i)); 2091 } 2092 2093 case DIF_VAR_CURTHREAD: 2094 if (!dtrace_priv_kernel(state)) 2095 return (0); 2096 return ((uint64_t)(uintptr_t)curthread); 2097 2098 case DIF_VAR_TIMESTAMP: 2099 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2100 mstate->dtms_timestamp = dtrace_gethrtime(); 2101 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2102 } 2103 return (mstate->dtms_timestamp); 2104 2105 case DIF_VAR_VTIMESTAMP: 2106 ASSERT(dtrace_vtime_references != 0); 2107 return (curthread->t_dtrace_vtime); 2108 2109 case DIF_VAR_WALLTIMESTAMP: 2110 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2111 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2112 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2113 } 2114 return (mstate->dtms_walltimestamp); 2115 2116 case DIF_VAR_IPL: 2117 if (!dtrace_priv_kernel(state)) 2118 return (0); 2119 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2120 mstate->dtms_ipl = dtrace_getipl(); 2121 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2122 } 2123 return (mstate->dtms_ipl); 2124 2125 case DIF_VAR_EPID: 2126 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2127 return (mstate->dtms_epid); 2128 2129 case DIF_VAR_ID: 2130 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2131 return (mstate->dtms_probe->dtpr_id); 2132 2133 case DIF_VAR_STACKDEPTH: 2134 if (!dtrace_priv_kernel(state)) 2135 return (0); 2136 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2137 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2138 2139 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2140 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2141 } 2142 return (mstate->dtms_stackdepth); 2143 2144 case DIF_VAR_USTACKDEPTH: 2145 if (!dtrace_priv_proc(state)) 2146 return (0); 2147 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2148 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2149 mstate->dtms_ustackdepth = dtrace_getustackdepth(); 2150 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2151 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2152 } 2153 return (mstate->dtms_ustackdepth); 2154 2155 case DIF_VAR_CALLER: 2156 if (!dtrace_priv_kernel(state)) 2157 return (0); 2158 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2159 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2160 2161 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2162 /* 2163 * If this is an unanchored probe, we are 2164 * required to go through the slow path: 2165 * dtrace_caller() only guarantees correct 2166 * results for anchored probes. 2167 */ 2168 pc_t caller[2]; 2169 2170 dtrace_getpcstack(caller, 2, aframes, 2171 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2172 mstate->dtms_caller = caller[1]; 2173 } else if ((mstate->dtms_caller = 2174 dtrace_caller(aframes)) == -1) { 2175 /* 2176 * We have failed to do this the quick way; 2177 * we must resort to the slower approach of 2178 * calling dtrace_getpcstack(). 2179 */ 2180 pc_t caller; 2181 2182 dtrace_getpcstack(&caller, 1, aframes, NULL); 2183 mstate->dtms_caller = caller; 2184 } 2185 2186 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2187 } 2188 return (mstate->dtms_caller); 2189 2190 case DIF_VAR_PROBEPROV: 2191 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2192 return ((uint64_t)(uintptr_t) 2193 mstate->dtms_probe->dtpr_provider->dtpv_name); 2194 2195 case DIF_VAR_PROBEMOD: 2196 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2197 return ((uint64_t)(uintptr_t) 2198 mstate->dtms_probe->dtpr_mod); 2199 2200 case DIF_VAR_PROBEFUNC: 2201 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2202 return ((uint64_t)(uintptr_t) 2203 mstate->dtms_probe->dtpr_func); 2204 2205 case DIF_VAR_PROBENAME: 2206 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2207 return ((uint64_t)(uintptr_t) 2208 mstate->dtms_probe->dtpr_name); 2209 2210 case DIF_VAR_PID: 2211 if (!dtrace_priv_proc(state)) 2212 return (0); 2213 2214 /* 2215 * Note that we are assuming that an unanchored probe is 2216 * always due to a high-level interrupt. (And we're assuming 2217 * that there is only a single high level interrupt.) 2218 */ 2219 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2220 return (pid0.pid_id); 2221 2222 /* 2223 * It is always safe to dereference one's own t_procp pointer: 2224 * it always points to a valid, allocated proc structure. 2225 * Further, it is always safe to dereference the p_pidp member 2226 * of one's own proc structure. (These are truisms becuase 2227 * threads and processes don't clean up their own state -- 2228 * they leave that task to whomever reaps them.) 2229 */ 2230 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2231 2232 case DIF_VAR_TID: 2233 /* 2234 * See comment in DIF_VAR_PID. 2235 */ 2236 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2237 return (0); 2238 2239 return ((uint64_t)curthread->t_tid); 2240 2241 case DIF_VAR_EXECNAME: 2242 if (!dtrace_priv_proc(state)) 2243 return (0); 2244 2245 /* 2246 * See comment in DIF_VAR_PID. 2247 */ 2248 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2249 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2250 2251 /* 2252 * It is always safe to dereference one's own t_procp pointer: 2253 * it always points to a valid, allocated proc structure. 2254 * (This is true because threads don't clean up their own 2255 * state -- they leave that task to whomever reaps them.) 2256 */ 2257 return ((uint64_t)(uintptr_t) 2258 curthread->t_procp->p_user.u_comm); 2259 2260 case DIF_VAR_ZONENAME: 2261 if (!dtrace_priv_proc(state)) 2262 return (0); 2263 2264 /* 2265 * See comment in DIF_VAR_PID. 2266 */ 2267 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2268 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2269 2270 /* 2271 * It is always safe to dereference one's own t_procp pointer: 2272 * it always points to a valid, allocated proc structure. 2273 * (This is true because threads don't clean up their own 2274 * state -- they leave that task to whomever reaps them.) 2275 */ 2276 return ((uint64_t)(uintptr_t) 2277 curthread->t_procp->p_zone->zone_name); 2278 2279 default: 2280 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2281 return (0); 2282 } 2283 } 2284 2285 /* 2286 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2287 * Notice that we don't bother validating the proper number of arguments or 2288 * their types in the tuple stack. This isn't needed because all argument 2289 * interpretation is safe because of our load safety -- the worst that can 2290 * happen is that a bogus program can obtain bogus results. 2291 */ 2292 static void 2293 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2294 dtrace_key_t *tupregs, int nargs, 2295 dtrace_mstate_t *mstate, dtrace_state_t *state) 2296 { 2297 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2298 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2299 2300 union { 2301 mutex_impl_t mi; 2302 uint64_t mx; 2303 } m; 2304 2305 union { 2306 krwlock_t ri; 2307 uintptr_t rw; 2308 } r; 2309 2310 switch (subr) { 2311 case DIF_SUBR_RAND: 2312 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2313 break; 2314 2315 case DIF_SUBR_MUTEX_OWNED: 2316 m.mx = dtrace_load64(tupregs[0].dttk_value); 2317 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2318 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2319 else 2320 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2321 break; 2322 2323 case DIF_SUBR_MUTEX_OWNER: 2324 m.mx = dtrace_load64(tupregs[0].dttk_value); 2325 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2326 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2327 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2328 else 2329 regs[rd] = 0; 2330 break; 2331 2332 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2333 m.mx = dtrace_load64(tupregs[0].dttk_value); 2334 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2335 break; 2336 2337 case DIF_SUBR_MUTEX_TYPE_SPIN: 2338 m.mx = dtrace_load64(tupregs[0].dttk_value); 2339 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2340 break; 2341 2342 case DIF_SUBR_RW_READ_HELD: { 2343 uintptr_t tmp; 2344 2345 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2346 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2347 break; 2348 } 2349 2350 case DIF_SUBR_RW_WRITE_HELD: 2351 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2352 regs[rd] = _RW_WRITE_HELD(&r.ri); 2353 break; 2354 2355 case DIF_SUBR_RW_ISWRITER: 2356 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2357 regs[rd] = _RW_ISWRITER(&r.ri); 2358 break; 2359 2360 case DIF_SUBR_BCOPY: { 2361 /* 2362 * We need to be sure that the destination is in the scratch 2363 * region -- no other region is allowed. 2364 */ 2365 uintptr_t src = tupregs[0].dttk_value; 2366 uintptr_t dest = tupregs[1].dttk_value; 2367 size_t size = tupregs[2].dttk_value; 2368 2369 if (!dtrace_inscratch(dest, size, mstate)) { 2370 *flags |= CPU_DTRACE_BADADDR; 2371 *illval = regs[rd]; 2372 break; 2373 } 2374 2375 dtrace_bcopy((void *)src, (void *)dest, size); 2376 break; 2377 } 2378 2379 case DIF_SUBR_ALLOCA: 2380 case DIF_SUBR_COPYIN: { 2381 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2382 uint64_t size = 2383 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2384 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2385 2386 /* 2387 * This action doesn't require any credential checks since 2388 * probes will not activate in user contexts to which the 2389 * enabling user does not have permissions. 2390 */ 2391 if (mstate->dtms_scratch_ptr + scratch_size > 2392 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2394 regs[rd] = NULL; 2395 break; 2396 } 2397 2398 if (subr == DIF_SUBR_COPYIN) { 2399 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2400 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2401 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2402 } 2403 2404 mstate->dtms_scratch_ptr += scratch_size; 2405 regs[rd] = dest; 2406 break; 2407 } 2408 2409 case DIF_SUBR_COPYINTO: { 2410 uint64_t size = tupregs[1].dttk_value; 2411 uintptr_t dest = tupregs[2].dttk_value; 2412 2413 /* 2414 * This action doesn't require any credential checks since 2415 * probes will not activate in user contexts to which the 2416 * enabling user does not have permissions. 2417 */ 2418 if (!dtrace_inscratch(dest, size, mstate)) { 2419 *flags |= CPU_DTRACE_BADADDR; 2420 *illval = regs[rd]; 2421 break; 2422 } 2423 2424 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2425 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2426 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2427 break; 2428 } 2429 2430 case DIF_SUBR_COPYINSTR: { 2431 uintptr_t dest = mstate->dtms_scratch_ptr; 2432 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2433 2434 if (nargs > 1 && tupregs[1].dttk_value < size) 2435 size = tupregs[1].dttk_value + 1; 2436 2437 /* 2438 * This action doesn't require any credential checks since 2439 * probes will not activate in user contexts to which the 2440 * enabling user does not have permissions. 2441 */ 2442 if (mstate->dtms_scratch_ptr + size > 2443 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2444 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2445 regs[rd] = NULL; 2446 break; 2447 } 2448 2449 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2450 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2451 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2452 2453 ((char *)dest)[size - 1] = '\0'; 2454 mstate->dtms_scratch_ptr += size; 2455 regs[rd] = dest; 2456 break; 2457 } 2458 2459 case DIF_SUBR_MSGSIZE: 2460 case DIF_SUBR_MSGDSIZE: { 2461 uintptr_t baddr = tupregs[0].dttk_value, daddr; 2462 uintptr_t wptr, rptr; 2463 size_t count = 0; 2464 int cont = 0; 2465 2466 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2467 wptr = dtrace_loadptr(baddr + 2468 offsetof(mblk_t, b_wptr)); 2469 2470 rptr = dtrace_loadptr(baddr + 2471 offsetof(mblk_t, b_rptr)); 2472 2473 if (wptr < rptr) { 2474 *flags |= CPU_DTRACE_BADADDR; 2475 *illval = tupregs[0].dttk_value; 2476 break; 2477 } 2478 2479 daddr = dtrace_loadptr(baddr + 2480 offsetof(mblk_t, b_datap)); 2481 2482 baddr = dtrace_loadptr(baddr + 2483 offsetof(mblk_t, b_cont)); 2484 2485 /* 2486 * We want to prevent against denial-of-service here, 2487 * so we're only going to search the list for 2488 * dtrace_msgdsize_max mblks. 2489 */ 2490 if (cont++ > dtrace_msgdsize_max) { 2491 *flags |= CPU_DTRACE_ILLOP; 2492 break; 2493 } 2494 2495 if (subr == DIF_SUBR_MSGDSIZE) { 2496 if (dtrace_load8(daddr + 2497 offsetof(dblk_t, db_type)) != M_DATA) 2498 continue; 2499 } 2500 2501 count += wptr - rptr; 2502 } 2503 2504 if (!(*flags & CPU_DTRACE_FAULT)) 2505 regs[rd] = count; 2506 2507 break; 2508 } 2509 2510 case DIF_SUBR_PROGENYOF: { 2511 pid_t pid = tupregs[0].dttk_value; 2512 proc_t *p; 2513 int rval = 0; 2514 2515 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2516 2517 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 2518 if (p->p_pidp->pid_id == pid) { 2519 rval = 1; 2520 break; 2521 } 2522 } 2523 2524 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2525 2526 regs[rd] = rval; 2527 break; 2528 } 2529 2530 case DIF_SUBR_SPECULATION: 2531 regs[rd] = dtrace_speculation(state); 2532 break; 2533 2534 case DIF_SUBR_COPYOUT: { 2535 uintptr_t kaddr = tupregs[0].dttk_value; 2536 uintptr_t uaddr = tupregs[1].dttk_value; 2537 uint64_t size = tupregs[2].dttk_value; 2538 2539 if (!dtrace_destructive_disallow && 2540 dtrace_priv_proc_control(state) && 2541 !dtrace_istoxic(kaddr, size)) { 2542 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2543 dtrace_copyout(kaddr, uaddr, size); 2544 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2545 } 2546 break; 2547 } 2548 2549 case DIF_SUBR_COPYOUTSTR: { 2550 uintptr_t kaddr = tupregs[0].dttk_value; 2551 uintptr_t uaddr = tupregs[1].dttk_value; 2552 uint64_t size = tupregs[2].dttk_value; 2553 2554 if (!dtrace_destructive_disallow && 2555 dtrace_priv_proc_control(state) && 2556 !dtrace_istoxic(kaddr, size)) { 2557 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2558 dtrace_copyoutstr(kaddr, uaddr, size); 2559 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2560 } 2561 break; 2562 } 2563 2564 case DIF_SUBR_STRLEN: 2565 regs[rd] = dtrace_strlen((char *)(uintptr_t) 2566 tupregs[0].dttk_value, 2567 state->dts_options[DTRACEOPT_STRSIZE]); 2568 break; 2569 2570 case DIF_SUBR_STRCHR: 2571 case DIF_SUBR_STRRCHR: { 2572 /* 2573 * We're going to iterate over the string looking for the 2574 * specified character. We will iterate until we have reached 2575 * the string length or we have found the character. If this 2576 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 2577 * of the specified character instead of the first. 2578 */ 2579 uintptr_t addr = tupregs[0].dttk_value; 2580 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 2581 char c, target = (char)tupregs[1].dttk_value; 2582 2583 for (regs[rd] = NULL; addr < limit; addr++) { 2584 if ((c = dtrace_load8(addr)) == target) { 2585 regs[rd] = addr; 2586 2587 if (subr == DIF_SUBR_STRCHR) 2588 break; 2589 } 2590 2591 if (c == '\0') 2592 break; 2593 } 2594 2595 break; 2596 } 2597 2598 case DIF_SUBR_STRSTR: 2599 case DIF_SUBR_INDEX: 2600 case DIF_SUBR_RINDEX: { 2601 /* 2602 * We're going to iterate over the string looking for the 2603 * specified string. We will iterate until we have reached 2604 * the string length or we have found the string. (Yes, this 2605 * is done in the most naive way possible -- but considering 2606 * that the string we're searching for is likely to be 2607 * relatively short, the complexity of Rabin-Karp or similar 2608 * hardly seems merited.) 2609 */ 2610 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 2611 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 2612 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2613 size_t len = dtrace_strlen(addr, size); 2614 size_t sublen = dtrace_strlen(substr, size); 2615 char *limit = addr + len, *orig = addr; 2616 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 2617 int inc = 1; 2618 2619 regs[rd] = notfound; 2620 2621 /* 2622 * strstr() and index()/rindex() have similar semantics if 2623 * both strings are the empty string: strstr() returns a 2624 * pointer to the (empty) string, and index() and rindex() 2625 * both return index 0 (regardless of any position argument). 2626 */ 2627 if (sublen == 0 && len == 0) { 2628 if (subr == DIF_SUBR_STRSTR) 2629 regs[rd] = (uintptr_t)addr; 2630 else 2631 regs[rd] = 0; 2632 break; 2633 } 2634 2635 if (subr != DIF_SUBR_STRSTR) { 2636 if (subr == DIF_SUBR_RINDEX) { 2637 limit = orig - 1; 2638 addr += len; 2639 inc = -1; 2640 } 2641 2642 /* 2643 * Both index() and rindex() take an optional position 2644 * argument that denotes the starting position. 2645 */ 2646 if (nargs == 3) { 2647 int64_t pos = (int64_t)tupregs[2].dttk_value; 2648 2649 /* 2650 * If the position argument to index() is 2651 * negative, Perl implicitly clamps it at 2652 * zero. This semantic is a little surprising 2653 * given the special meaning of negative 2654 * positions to similar Perl functions like 2655 * substr(), but it appears to reflect a 2656 * notion that index() can start from a 2657 * negative index and increment its way up to 2658 * the string. Given this notion, Perl's 2659 * rindex() is at least self-consistent in 2660 * that it implicitly clamps positions greater 2661 * than the string length to be the string 2662 * length. Where Perl completely loses 2663 * coherence, however, is when the specified 2664 * substring is the empty string (""). In 2665 * this case, even if the position is 2666 * negative, rindex() returns 0 -- and even if 2667 * the position is greater than the length, 2668 * index() returns the string length. These 2669 * semantics violate the notion that index() 2670 * should never return a value less than the 2671 * specified position and that rindex() should 2672 * never return a value greater than the 2673 * specified position. (One assumes that 2674 * these semantics are artifacts of Perl's 2675 * implementation and not the results of 2676 * deliberate design -- it beggars belief that 2677 * even Larry Wall could desire such oddness.) 2678 * While in the abstract one would wish for 2679 * consistent position semantics across 2680 * substr(), index() and rindex() -- or at the 2681 * very least self-consitent position 2682 * semantics for index() and rindex() -- we 2683 * instead opt to keep with the extant Perl 2684 * semantics, in all their broken glory. (Do 2685 * we have more desire to maintain Perl's 2686 * semantics than Perl does? Probably.) 2687 */ 2688 if (subr == DIF_SUBR_RINDEX) { 2689 if (pos < 0) { 2690 if (sublen == 0) 2691 regs[rd] = 0; 2692 break; 2693 } 2694 2695 if (pos > len) 2696 pos = len; 2697 } else { 2698 if (pos < 0) 2699 pos = 0; 2700 2701 if (pos >= len) { 2702 if (sublen == 0) 2703 regs[rd] = len; 2704 break; 2705 } 2706 } 2707 2708 addr = orig + pos; 2709 } 2710 } 2711 2712 for (regs[rd] = notfound; addr != limit; addr += inc) { 2713 if (dtrace_strncmp(addr, substr, sublen) == 0) { 2714 if (subr != DIF_SUBR_STRSTR) { 2715 /* 2716 * As D index() and rindex() are 2717 * modeled on Perl (and not on awk), 2718 * we return a zero-based (and not a 2719 * one-based) index. (For you Perl 2720 * weenies: no, we're not going to add 2721 * $[ -- and shouldn't you be at a con 2722 * or something?) 2723 */ 2724 regs[rd] = (uintptr_t)(addr - orig); 2725 break; 2726 } 2727 2728 ASSERT(subr == DIF_SUBR_STRSTR); 2729 regs[rd] = (uintptr_t)addr; 2730 break; 2731 } 2732 } 2733 2734 break; 2735 } 2736 2737 case DIF_SUBR_STRTOK: { 2738 uintptr_t addr = tupregs[0].dttk_value; 2739 uintptr_t tokaddr = tupregs[1].dttk_value; 2740 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2741 uintptr_t limit, toklimit = tokaddr + size; 2742 uint8_t c, tokmap[32]; /* 256 / 8 */ 2743 char *dest = (char *)mstate->dtms_scratch_ptr; 2744 int i; 2745 2746 if (mstate->dtms_scratch_ptr + size > 2747 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2748 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2749 regs[rd] = NULL; 2750 break; 2751 } 2752 2753 if (addr == NULL) { 2754 /* 2755 * If the address specified is NULL, we use our saved 2756 * strtok pointer from the mstate. Note that this 2757 * means that the saved strtok pointer is _only_ 2758 * valid within multiple enablings of the same probe -- 2759 * it behaves like an implicit clause-local variable. 2760 */ 2761 addr = mstate->dtms_strtok; 2762 } 2763 2764 /* 2765 * First, zero the token map, and then process the token 2766 * string -- setting a bit in the map for every character 2767 * found in the token string. 2768 */ 2769 for (i = 0; i < sizeof (tokmap); i++) 2770 tokmap[i] = 0; 2771 2772 for (; tokaddr < toklimit; tokaddr++) { 2773 if ((c = dtrace_load8(tokaddr)) == '\0') 2774 break; 2775 2776 ASSERT((c >> 3) < sizeof (tokmap)); 2777 tokmap[c >> 3] |= (1 << (c & 0x7)); 2778 } 2779 2780 for (limit = addr + size; addr < limit; addr++) { 2781 /* 2782 * We're looking for a character that is _not_ contained 2783 * in the token string. 2784 */ 2785 if ((c = dtrace_load8(addr)) == '\0') 2786 break; 2787 2788 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 2789 break; 2790 } 2791 2792 if (c == '\0') { 2793 /* 2794 * We reached the end of the string without finding 2795 * any character that was not in the token string. 2796 * We return NULL in this case, and we set the saved 2797 * address to NULL as well. 2798 */ 2799 regs[rd] = NULL; 2800 mstate->dtms_strtok = NULL; 2801 break; 2802 } 2803 2804 /* 2805 * From here on, we're copying into the destination string. 2806 */ 2807 for (i = 0; addr < limit && i < size - 1; addr++) { 2808 if ((c = dtrace_load8(addr)) == '\0') 2809 break; 2810 2811 if (tokmap[c >> 3] & (1 << (c & 0x7))) 2812 break; 2813 2814 ASSERT(i < size); 2815 dest[i++] = c; 2816 } 2817 2818 ASSERT(i < size); 2819 dest[i] = '\0'; 2820 regs[rd] = (uintptr_t)dest; 2821 mstate->dtms_scratch_ptr += size; 2822 mstate->dtms_strtok = addr; 2823 break; 2824 } 2825 2826 case DIF_SUBR_SUBSTR: { 2827 uintptr_t s = tupregs[0].dttk_value; 2828 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2829 char *d = (char *)mstate->dtms_scratch_ptr; 2830 int64_t index = (int64_t)tupregs[1].dttk_value; 2831 int64_t remaining = (int64_t)tupregs[2].dttk_value; 2832 size_t len = dtrace_strlen((char *)s, size); 2833 int64_t i = 0; 2834 2835 if (nargs <= 2) 2836 remaining = (int64_t)size; 2837 2838 if (mstate->dtms_scratch_ptr + size > 2839 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2840 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2841 regs[rd] = NULL; 2842 break; 2843 } 2844 2845 if (index < 0) { 2846 index += len; 2847 2848 if (index < 0 && index + remaining > 0) { 2849 remaining += index; 2850 index = 0; 2851 } 2852 } 2853 2854 if (index >= len || index < 0) 2855 index = len; 2856 2857 for (d[0] = '\0'; remaining > 0; remaining--) { 2858 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 2859 break; 2860 2861 if (i == size) { 2862 d[i - 1] = '\0'; 2863 break; 2864 } 2865 } 2866 2867 mstate->dtms_scratch_ptr += size; 2868 regs[rd] = (uintptr_t)d; 2869 break; 2870 } 2871 2872 case DIF_SUBR_GETMAJOR: 2873 #ifdef _LP64 2874 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 2875 #else 2876 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 2877 #endif 2878 break; 2879 2880 case DIF_SUBR_GETMINOR: 2881 #ifdef _LP64 2882 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 2883 #else 2884 regs[rd] = tupregs[0].dttk_value & MAXMIN; 2885 #endif 2886 break; 2887 2888 case DIF_SUBR_DDI_PATHNAME: { 2889 /* 2890 * This one is a galactic mess. We are going to roughly 2891 * emulate ddi_pathname(), but it's made more complicated 2892 * by the fact that we (a) want to include the minor name and 2893 * (b) must proceed iteratively instead of recursively. 2894 */ 2895 uintptr_t dest = mstate->dtms_scratch_ptr; 2896 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2897 char *start = (char *)dest, *end = start + size - 1; 2898 uintptr_t daddr = tupregs[0].dttk_value; 2899 int64_t minor = (int64_t)tupregs[1].dttk_value; 2900 char *s; 2901 int i, len, depth = 0; 2902 2903 if (size == 0 || mstate->dtms_scratch_ptr + size > 2904 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2905 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2906 regs[rd] = NULL; 2907 break; 2908 } 2909 2910 *end = '\0'; 2911 2912 /* 2913 * We want to have a name for the minor. In order to do this, 2914 * we need to walk the minor list from the devinfo. We want 2915 * to be sure that we don't infinitely walk a circular list, 2916 * so we check for circularity by sending a scout pointer 2917 * ahead two elements for every element that we iterate over; 2918 * if the list is circular, these will ultimately point to the 2919 * same element. You may recognize this little trick as the 2920 * answer to a stupid interview question -- one that always 2921 * seems to be asked by those who had to have it laboriously 2922 * explained to them, and who can't even concisely describe 2923 * the conditions under which one would be forced to resort to 2924 * this technique. Needless to say, those conditions are 2925 * found here -- and probably only here. Is this is the only 2926 * use of this infamous trick in shipping, production code? 2927 * If it isn't, it probably should be... 2928 */ 2929 if (minor != -1) { 2930 uintptr_t maddr = dtrace_loadptr(daddr + 2931 offsetof(struct dev_info, devi_minor)); 2932 2933 uintptr_t next = offsetof(struct ddi_minor_data, next); 2934 uintptr_t name = offsetof(struct ddi_minor_data, 2935 d_minor) + offsetof(struct ddi_minor, name); 2936 uintptr_t dev = offsetof(struct ddi_minor_data, 2937 d_minor) + offsetof(struct ddi_minor, dev); 2938 uintptr_t scout; 2939 2940 if (maddr != NULL) 2941 scout = dtrace_loadptr(maddr + next); 2942 2943 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2944 uint64_t m; 2945 #ifdef _LP64 2946 m = dtrace_load64(maddr + dev) & MAXMIN64; 2947 #else 2948 m = dtrace_load32(maddr + dev) & MAXMIN; 2949 #endif 2950 if (m != minor) { 2951 maddr = dtrace_loadptr(maddr + next); 2952 2953 if (scout == NULL) 2954 continue; 2955 2956 scout = dtrace_loadptr(scout + next); 2957 2958 if (scout == NULL) 2959 continue; 2960 2961 scout = dtrace_loadptr(scout + next); 2962 2963 if (scout == NULL) 2964 continue; 2965 2966 if (scout == maddr) { 2967 *flags |= CPU_DTRACE_ILLOP; 2968 break; 2969 } 2970 2971 continue; 2972 } 2973 2974 /* 2975 * We have the minor data. Now we need to 2976 * copy the minor's name into the end of the 2977 * pathname. 2978 */ 2979 s = (char *)dtrace_loadptr(maddr + name); 2980 len = dtrace_strlen(s, size); 2981 2982 if (*flags & CPU_DTRACE_FAULT) 2983 break; 2984 2985 if (len != 0) { 2986 if ((end -= (len + 1)) < start) 2987 break; 2988 2989 *end = ':'; 2990 } 2991 2992 for (i = 1; i <= len; i++) 2993 end[i] = dtrace_load8((uintptr_t)s++); 2994 break; 2995 } 2996 } 2997 2998 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2999 ddi_node_state_t devi_state; 3000 3001 devi_state = dtrace_load32(daddr + 3002 offsetof(struct dev_info, devi_node_state)); 3003 3004 if (*flags & CPU_DTRACE_FAULT) 3005 break; 3006 3007 if (devi_state >= DS_INITIALIZED) { 3008 s = (char *)dtrace_loadptr(daddr + 3009 offsetof(struct dev_info, devi_addr)); 3010 len = dtrace_strlen(s, size); 3011 3012 if (*flags & CPU_DTRACE_FAULT) 3013 break; 3014 3015 if (len != 0) { 3016 if ((end -= (len + 1)) < start) 3017 break; 3018 3019 *end = '@'; 3020 } 3021 3022 for (i = 1; i <= len; i++) 3023 end[i] = dtrace_load8((uintptr_t)s++); 3024 } 3025 3026 /* 3027 * Now for the node name... 3028 */ 3029 s = (char *)dtrace_loadptr(daddr + 3030 offsetof(struct dev_info, devi_node_name)); 3031 3032 daddr = dtrace_loadptr(daddr + 3033 offsetof(struct dev_info, devi_parent)); 3034 3035 /* 3036 * If our parent is NULL (that is, if we're the root 3037 * node), we're going to use the special path 3038 * "devices". 3039 */ 3040 if (daddr == NULL) 3041 s = "devices"; 3042 3043 len = dtrace_strlen(s, size); 3044 if (*flags & CPU_DTRACE_FAULT) 3045 break; 3046 3047 if ((end -= (len + 1)) < start) 3048 break; 3049 3050 for (i = 1; i <= len; i++) 3051 end[i] = dtrace_load8((uintptr_t)s++); 3052 *end = '/'; 3053 3054 if (depth++ > dtrace_devdepth_max) { 3055 *flags |= CPU_DTRACE_ILLOP; 3056 break; 3057 } 3058 } 3059 3060 if (end < start) 3061 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3062 3063 if (daddr == NULL) { 3064 regs[rd] = (uintptr_t)end; 3065 mstate->dtms_scratch_ptr += size; 3066 } 3067 3068 break; 3069 } 3070 3071 case DIF_SUBR_STRJOIN: { 3072 char *d = (char *)mstate->dtms_scratch_ptr; 3073 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3074 uintptr_t s1 = tupregs[0].dttk_value; 3075 uintptr_t s2 = tupregs[1].dttk_value; 3076 int i = 0; 3077 3078 if (mstate->dtms_scratch_ptr + size > 3079 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3080 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3081 regs[rd] = NULL; 3082 break; 3083 } 3084 3085 for (;;) { 3086 if (i >= size) { 3087 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3088 regs[rd] = NULL; 3089 break; 3090 } 3091 3092 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3093 i--; 3094 break; 3095 } 3096 } 3097 3098 for (;;) { 3099 if (i >= size) { 3100 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3101 regs[rd] = NULL; 3102 break; 3103 } 3104 3105 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3106 break; 3107 } 3108 3109 if (i < size) { 3110 mstate->dtms_scratch_ptr += i; 3111 regs[rd] = (uintptr_t)d; 3112 } 3113 3114 break; 3115 } 3116 3117 case DIF_SUBR_LLTOSTR: { 3118 int64_t i = (int64_t)tupregs[0].dttk_value; 3119 int64_t val = i < 0 ? i * -1 : i; 3120 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3121 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3122 3123 if (mstate->dtms_scratch_ptr + size > 3124 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3125 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3126 regs[rd] = NULL; 3127 break; 3128 } 3129 3130 for (*end-- = '\0'; val; val /= 10) 3131 *end-- = '0' + (val % 10); 3132 3133 if (i == 0) 3134 *end-- = '0'; 3135 3136 if (i < 0) 3137 *end-- = '-'; 3138 3139 regs[rd] = (uintptr_t)end + 1; 3140 mstate->dtms_scratch_ptr += size; 3141 break; 3142 } 3143 3144 case DIF_SUBR_DIRNAME: 3145 case DIF_SUBR_BASENAME: { 3146 char *dest = (char *)mstate->dtms_scratch_ptr; 3147 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3148 uintptr_t src = tupregs[0].dttk_value; 3149 int i, j, len = dtrace_strlen((char *)src, size); 3150 int lastbase = -1, firstbase = -1, lastdir = -1; 3151 int start, end; 3152 3153 if (mstate->dtms_scratch_ptr + size > 3154 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3155 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3156 regs[rd] = NULL; 3157 break; 3158 } 3159 3160 /* 3161 * The basename and dirname for a zero-length string is 3162 * defined to be "." 3163 */ 3164 if (len == 0) { 3165 len = 1; 3166 src = (uintptr_t)"."; 3167 } 3168 3169 /* 3170 * Start from the back of the string, moving back toward the 3171 * front until we see a character that isn't a slash. That 3172 * character is the last character in the basename. 3173 */ 3174 for (i = len - 1; i >= 0; i--) { 3175 if (dtrace_load8(src + i) != '/') 3176 break; 3177 } 3178 3179 if (i >= 0) 3180 lastbase = i; 3181 3182 /* 3183 * Starting from the last character in the basename, move 3184 * towards the front until we find a slash. The character 3185 * that we processed immediately before that is the first 3186 * character in the basename. 3187 */ 3188 for (; i >= 0; i--) { 3189 if (dtrace_load8(src + i) == '/') 3190 break; 3191 } 3192 3193 if (i >= 0) 3194 firstbase = i + 1; 3195 3196 /* 3197 * Now keep going until we find a non-slash character. That 3198 * character is the last character in the dirname. 3199 */ 3200 for (; i >= 0; i--) { 3201 if (dtrace_load8(src + i) != '/') 3202 break; 3203 } 3204 3205 if (i >= 0) 3206 lastdir = i; 3207 3208 ASSERT(!(lastbase == -1 && firstbase != -1)); 3209 ASSERT(!(firstbase == -1 && lastdir != -1)); 3210 3211 if (lastbase == -1) { 3212 /* 3213 * We didn't find a non-slash character. We know that 3214 * the length is non-zero, so the whole string must be 3215 * slashes. In either the dirname or the basename 3216 * case, we return '/'. 3217 */ 3218 ASSERT(firstbase == -1); 3219 firstbase = lastbase = lastdir = 0; 3220 } 3221 3222 if (firstbase == -1) { 3223 /* 3224 * The entire string consists only of a basename 3225 * component. If we're looking for dirname, we need 3226 * to change our string to be just "."; if we're 3227 * looking for a basename, we'll just set the first 3228 * character of the basename to be 0. 3229 */ 3230 if (subr == DIF_SUBR_DIRNAME) { 3231 ASSERT(lastdir == -1); 3232 src = (uintptr_t)"."; 3233 lastdir = 0; 3234 } else { 3235 firstbase = 0; 3236 } 3237 } 3238 3239 if (subr == DIF_SUBR_DIRNAME) { 3240 if (lastdir == -1) { 3241 /* 3242 * We know that we have a slash in the name -- 3243 * or lastdir would be set to 0, above. And 3244 * because lastdir is -1, we know that this 3245 * slash must be the first character. (That 3246 * is, the full string must be of the form 3247 * "/basename".) In this case, the last 3248 * character of the directory name is 0. 3249 */ 3250 lastdir = 0; 3251 } 3252 3253 start = 0; 3254 end = lastdir; 3255 } else { 3256 ASSERT(subr == DIF_SUBR_BASENAME); 3257 ASSERT(firstbase != -1 && lastbase != -1); 3258 start = firstbase; 3259 end = lastbase; 3260 } 3261 3262 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3263 dest[j] = dtrace_load8(src + i); 3264 3265 dest[j] = '\0'; 3266 regs[rd] = (uintptr_t)dest; 3267 mstate->dtms_scratch_ptr += size; 3268 break; 3269 } 3270 3271 case DIF_SUBR_CLEANPATH: { 3272 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3273 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3274 uintptr_t src = tupregs[0].dttk_value; 3275 int i = 0, j = 0; 3276 3277 if (mstate->dtms_scratch_ptr + size > 3278 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3279 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3280 regs[rd] = NULL; 3281 break; 3282 } 3283 3284 /* 3285 * Move forward, loading each character. 3286 */ 3287 do { 3288 c = dtrace_load8(src + i++); 3289 next: 3290 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3291 break; 3292 3293 if (c != '/') { 3294 dest[j++] = c; 3295 continue; 3296 } 3297 3298 c = dtrace_load8(src + i++); 3299 3300 if (c == '/') { 3301 /* 3302 * We have two slashes -- we can just advance 3303 * to the next character. 3304 */ 3305 goto next; 3306 } 3307 3308 if (c != '.') { 3309 /* 3310 * This is not "." and it's not ".." -- we can 3311 * just store the "/" and this character and 3312 * drive on. 3313 */ 3314 dest[j++] = '/'; 3315 dest[j++] = c; 3316 continue; 3317 } 3318 3319 c = dtrace_load8(src + i++); 3320 3321 if (c == '/') { 3322 /* 3323 * This is a "/./" component. We're not going 3324 * to store anything in the destination buffer; 3325 * we're just going to go to the next component. 3326 */ 3327 goto next; 3328 } 3329 3330 if (c != '.') { 3331 /* 3332 * This is not ".." -- we can just store the 3333 * "/." and this character and continue 3334 * processing. 3335 */ 3336 dest[j++] = '/'; 3337 dest[j++] = '.'; 3338 dest[j++] = c; 3339 continue; 3340 } 3341 3342 c = dtrace_load8(src + i++); 3343 3344 if (c != '/' && c != '\0') { 3345 /* 3346 * This is not ".." -- it's "..[mumble]". 3347 * We'll store the "/.." and this character 3348 * and continue processing. 3349 */ 3350 dest[j++] = '/'; 3351 dest[j++] = '.'; 3352 dest[j++] = '.'; 3353 dest[j++] = c; 3354 continue; 3355 } 3356 3357 /* 3358 * This is "/../" or "/..\0". We need to back up 3359 * our destination pointer until we find a "/". 3360 */ 3361 i--; 3362 while (j != 0 && dest[--j] != '/') 3363 continue; 3364 3365 if (c == '\0') 3366 dest[++j] = '/'; 3367 } while (c != '\0'); 3368 3369 dest[j] = '\0'; 3370 regs[rd] = (uintptr_t)dest; 3371 mstate->dtms_scratch_ptr += size; 3372 break; 3373 } 3374 } 3375 } 3376 3377 /* 3378 * Emulate the execution of DTrace IR instructions specified by the given 3379 * DIF object. This function is deliberately void of assertions as all of 3380 * the necessary checks are handled by a call to dtrace_difo_validate(). 3381 */ 3382 static uint64_t 3383 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 3384 dtrace_vstate_t *vstate, dtrace_state_t *state) 3385 { 3386 const dif_instr_t *text = difo->dtdo_buf; 3387 const uint_t textlen = difo->dtdo_len; 3388 const char *strtab = difo->dtdo_strtab; 3389 const uint64_t *inttab = difo->dtdo_inttab; 3390 3391 uint64_t rval = 0; 3392 dtrace_statvar_t *svar; 3393 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 3394 dtrace_difv_t *v; 3395 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3396 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3397 3398 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 3399 uint64_t regs[DIF_DIR_NREGS]; 3400 uint64_t *tmp; 3401 3402 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 3403 int64_t cc_r; 3404 uint_t pc = 0, id, opc; 3405 uint8_t ttop = 0; 3406 dif_instr_t instr; 3407 uint_t r1, r2, rd; 3408 3409 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 3410 3411 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 3412 opc = pc; 3413 3414 instr = text[pc++]; 3415 r1 = DIF_INSTR_R1(instr); 3416 r2 = DIF_INSTR_R2(instr); 3417 rd = DIF_INSTR_RD(instr); 3418 3419 switch (DIF_INSTR_OP(instr)) { 3420 case DIF_OP_OR: 3421 regs[rd] = regs[r1] | regs[r2]; 3422 break; 3423 case DIF_OP_XOR: 3424 regs[rd] = regs[r1] ^ regs[r2]; 3425 break; 3426 case DIF_OP_AND: 3427 regs[rd] = regs[r1] & regs[r2]; 3428 break; 3429 case DIF_OP_SLL: 3430 regs[rd] = regs[r1] << regs[r2]; 3431 break; 3432 case DIF_OP_SRL: 3433 regs[rd] = regs[r1] >> regs[r2]; 3434 break; 3435 case DIF_OP_SUB: 3436 regs[rd] = regs[r1] - regs[r2]; 3437 break; 3438 case DIF_OP_ADD: 3439 regs[rd] = regs[r1] + regs[r2]; 3440 break; 3441 case DIF_OP_MUL: 3442 regs[rd] = regs[r1] * regs[r2]; 3443 break; 3444 case DIF_OP_SDIV: 3445 if (regs[r2] == 0) { 3446 regs[rd] = 0; 3447 *flags |= CPU_DTRACE_DIVZERO; 3448 } else { 3449 regs[rd] = (int64_t)regs[r1] / 3450 (int64_t)regs[r2]; 3451 } 3452 break; 3453 3454 case DIF_OP_UDIV: 3455 if (regs[r2] == 0) { 3456 regs[rd] = 0; 3457 *flags |= CPU_DTRACE_DIVZERO; 3458 } else { 3459 regs[rd] = regs[r1] / regs[r2]; 3460 } 3461 break; 3462 3463 case DIF_OP_SREM: 3464 if (regs[r2] == 0) { 3465 regs[rd] = 0; 3466 *flags |= CPU_DTRACE_DIVZERO; 3467 } else { 3468 regs[rd] = (int64_t)regs[r1] % 3469 (int64_t)regs[r2]; 3470 } 3471 break; 3472 3473 case DIF_OP_UREM: 3474 if (regs[r2] == 0) { 3475 regs[rd] = 0; 3476 *flags |= CPU_DTRACE_DIVZERO; 3477 } else { 3478 regs[rd] = regs[r1] % regs[r2]; 3479 } 3480 break; 3481 3482 case DIF_OP_NOT: 3483 regs[rd] = ~regs[r1]; 3484 break; 3485 case DIF_OP_MOV: 3486 regs[rd] = regs[r1]; 3487 break; 3488 case DIF_OP_CMP: 3489 cc_r = regs[r1] - regs[r2]; 3490 cc_n = cc_r < 0; 3491 cc_z = cc_r == 0; 3492 cc_v = 0; 3493 cc_c = regs[r1] < regs[r2]; 3494 break; 3495 case DIF_OP_TST: 3496 cc_n = cc_v = cc_c = 0; 3497 cc_z = regs[r1] == 0; 3498 break; 3499 case DIF_OP_BA: 3500 pc = DIF_INSTR_LABEL(instr); 3501 break; 3502 case DIF_OP_BE: 3503 if (cc_z) 3504 pc = DIF_INSTR_LABEL(instr); 3505 break; 3506 case DIF_OP_BNE: 3507 if (cc_z == 0) 3508 pc = DIF_INSTR_LABEL(instr); 3509 break; 3510 case DIF_OP_BG: 3511 if ((cc_z | (cc_n ^ cc_v)) == 0) 3512 pc = DIF_INSTR_LABEL(instr); 3513 break; 3514 case DIF_OP_BGU: 3515 if ((cc_c | cc_z) == 0) 3516 pc = DIF_INSTR_LABEL(instr); 3517 break; 3518 case DIF_OP_BGE: 3519 if ((cc_n ^ cc_v) == 0) 3520 pc = DIF_INSTR_LABEL(instr); 3521 break; 3522 case DIF_OP_BGEU: 3523 if (cc_c == 0) 3524 pc = DIF_INSTR_LABEL(instr); 3525 break; 3526 case DIF_OP_BL: 3527 if (cc_n ^ cc_v) 3528 pc = DIF_INSTR_LABEL(instr); 3529 break; 3530 case DIF_OP_BLU: 3531 if (cc_c) 3532 pc = DIF_INSTR_LABEL(instr); 3533 break; 3534 case DIF_OP_BLE: 3535 if (cc_z | (cc_n ^ cc_v)) 3536 pc = DIF_INSTR_LABEL(instr); 3537 break; 3538 case DIF_OP_BLEU: 3539 if (cc_c | cc_z) 3540 pc = DIF_INSTR_LABEL(instr); 3541 break; 3542 case DIF_OP_RLDSB: 3543 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3544 *flags |= CPU_DTRACE_KPRIV; 3545 *illval = regs[r1]; 3546 break; 3547 } 3548 /*FALLTHROUGH*/ 3549 case DIF_OP_LDSB: 3550 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 3551 break; 3552 case DIF_OP_RLDSH: 3553 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3554 *flags |= CPU_DTRACE_KPRIV; 3555 *illval = regs[r1]; 3556 break; 3557 } 3558 /*FALLTHROUGH*/ 3559 case DIF_OP_LDSH: 3560 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 3561 break; 3562 case DIF_OP_RLDSW: 3563 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3564 *flags |= CPU_DTRACE_KPRIV; 3565 *illval = regs[r1]; 3566 break; 3567 } 3568 /*FALLTHROUGH*/ 3569 case DIF_OP_LDSW: 3570 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 3571 break; 3572 case DIF_OP_RLDUB: 3573 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3574 *flags |= CPU_DTRACE_KPRIV; 3575 *illval = regs[r1]; 3576 break; 3577 } 3578 /*FALLTHROUGH*/ 3579 case DIF_OP_LDUB: 3580 regs[rd] = dtrace_load8(regs[r1]); 3581 break; 3582 case DIF_OP_RLDUH: 3583 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3584 *flags |= CPU_DTRACE_KPRIV; 3585 *illval = regs[r1]; 3586 break; 3587 } 3588 /*FALLTHROUGH*/ 3589 case DIF_OP_LDUH: 3590 regs[rd] = dtrace_load16(regs[r1]); 3591 break; 3592 case DIF_OP_RLDUW: 3593 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3594 *flags |= CPU_DTRACE_KPRIV; 3595 *illval = regs[r1]; 3596 break; 3597 } 3598 /*FALLTHROUGH*/ 3599 case DIF_OP_LDUW: 3600 regs[rd] = dtrace_load32(regs[r1]); 3601 break; 3602 case DIF_OP_RLDX: 3603 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 3604 *flags |= CPU_DTRACE_KPRIV; 3605 *illval = regs[r1]; 3606 break; 3607 } 3608 /*FALLTHROUGH*/ 3609 case DIF_OP_LDX: 3610 regs[rd] = dtrace_load64(regs[r1]); 3611 break; 3612 case DIF_OP_ULDSB: 3613 regs[rd] = (int8_t) 3614 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3615 break; 3616 case DIF_OP_ULDSH: 3617 regs[rd] = (int16_t) 3618 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3619 break; 3620 case DIF_OP_ULDSW: 3621 regs[rd] = (int32_t) 3622 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3623 break; 3624 case DIF_OP_ULDUB: 3625 regs[rd] = 3626 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3627 break; 3628 case DIF_OP_ULDUH: 3629 regs[rd] = 3630 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3631 break; 3632 case DIF_OP_ULDUW: 3633 regs[rd] = 3634 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3635 break; 3636 case DIF_OP_ULDX: 3637 regs[rd] = 3638 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 3639 break; 3640 case DIF_OP_RET: 3641 rval = regs[rd]; 3642 break; 3643 case DIF_OP_NOP: 3644 break; 3645 case DIF_OP_SETX: 3646 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 3647 break; 3648 case DIF_OP_SETS: 3649 regs[rd] = (uint64_t)(uintptr_t) 3650 (strtab + DIF_INSTR_STRING(instr)); 3651 break; 3652 case DIF_OP_SCMP: 3653 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1], 3654 (char *)(uintptr_t)regs[r2], 3655 state->dts_options[DTRACEOPT_STRSIZE]); 3656 3657 cc_n = cc_r < 0; 3658 cc_z = cc_r == 0; 3659 cc_v = cc_c = 0; 3660 break; 3661 case DIF_OP_LDGA: 3662 regs[rd] = dtrace_dif_variable(mstate, state, 3663 r1, regs[r2]); 3664 break; 3665 case DIF_OP_LDGS: 3666 id = DIF_INSTR_VAR(instr); 3667 3668 if (id >= DIF_VAR_OTHER_UBASE) { 3669 uintptr_t a; 3670 3671 id -= DIF_VAR_OTHER_UBASE; 3672 svar = vstate->dtvs_globals[id]; 3673 ASSERT(svar != NULL); 3674 v = &svar->dtsv_var; 3675 3676 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 3677 regs[rd] = svar->dtsv_data; 3678 break; 3679 } 3680 3681 a = (uintptr_t)svar->dtsv_data; 3682 3683 if (*(uint8_t *)a == UINT8_MAX) { 3684 /* 3685 * If the 0th byte is set to UINT8_MAX 3686 * then this is to be treated as a 3687 * reference to a NULL variable. 3688 */ 3689 regs[rd] = NULL; 3690 } else { 3691 regs[rd] = a + sizeof (uint64_t); 3692 } 3693 3694 break; 3695 } 3696 3697 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 3698 break; 3699 3700 case DIF_OP_STGS: 3701 id = DIF_INSTR_VAR(instr); 3702 3703 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3704 id -= DIF_VAR_OTHER_UBASE; 3705 3706 svar = vstate->dtvs_globals[id]; 3707 ASSERT(svar != NULL); 3708 v = &svar->dtsv_var; 3709 3710 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3711 uintptr_t a = (uintptr_t)svar->dtsv_data; 3712 3713 ASSERT(a != NULL); 3714 ASSERT(svar->dtsv_size != 0); 3715 3716 if (regs[rd] == NULL) { 3717 *(uint8_t *)a = UINT8_MAX; 3718 break; 3719 } else { 3720 *(uint8_t *)a = 0; 3721 a += sizeof (uint64_t); 3722 } 3723 3724 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3725 (void *)a, &v->dtdv_type); 3726 break; 3727 } 3728 3729 svar->dtsv_data = regs[rd]; 3730 break; 3731 3732 case DIF_OP_LDTA: 3733 /* 3734 * There are no DTrace built-in thread-local arrays at 3735 * present. This opcode is saved for future work. 3736 */ 3737 *flags |= CPU_DTRACE_ILLOP; 3738 regs[rd] = 0; 3739 break; 3740 3741 case DIF_OP_LDLS: 3742 id = DIF_INSTR_VAR(instr); 3743 3744 if (id < DIF_VAR_OTHER_UBASE) { 3745 /* 3746 * For now, this has no meaning. 3747 */ 3748 regs[rd] = 0; 3749 break; 3750 } 3751 3752 id -= DIF_VAR_OTHER_UBASE; 3753 3754 ASSERT(id < vstate->dtvs_nlocals); 3755 ASSERT(vstate->dtvs_locals != NULL); 3756 3757 svar = vstate->dtvs_locals[id]; 3758 ASSERT(svar != NULL); 3759 v = &svar->dtsv_var; 3760 3761 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3762 uintptr_t a = (uintptr_t)svar->dtsv_data; 3763 size_t sz = v->dtdv_type.dtdt_size; 3764 3765 sz += sizeof (uint64_t); 3766 ASSERT(svar->dtsv_size == NCPU * sz); 3767 a += CPU->cpu_id * sz; 3768 3769 if (*(uint8_t *)a == UINT8_MAX) { 3770 /* 3771 * If the 0th byte is set to UINT8_MAX 3772 * then this is to be treated as a 3773 * reference to a NULL variable. 3774 */ 3775 regs[rd] = NULL; 3776 } else { 3777 regs[rd] = a + sizeof (uint64_t); 3778 } 3779 3780 break; 3781 } 3782 3783 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3784 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 3785 regs[rd] = tmp[CPU->cpu_id]; 3786 break; 3787 3788 case DIF_OP_STLS: 3789 id = DIF_INSTR_VAR(instr); 3790 3791 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3792 id -= DIF_VAR_OTHER_UBASE; 3793 ASSERT(id < vstate->dtvs_nlocals); 3794 3795 ASSERT(vstate->dtvs_locals != NULL); 3796 svar = vstate->dtvs_locals[id]; 3797 ASSERT(svar != NULL); 3798 v = &svar->dtsv_var; 3799 3800 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3801 uintptr_t a = (uintptr_t)svar->dtsv_data; 3802 size_t sz = v->dtdv_type.dtdt_size; 3803 3804 sz += sizeof (uint64_t); 3805 ASSERT(svar->dtsv_size == NCPU * sz); 3806 a += CPU->cpu_id * sz; 3807 3808 if (regs[rd] == NULL) { 3809 *(uint8_t *)a = UINT8_MAX; 3810 break; 3811 } else { 3812 *(uint8_t *)a = 0; 3813 a += sizeof (uint64_t); 3814 } 3815 3816 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3817 (void *)a, &v->dtdv_type); 3818 break; 3819 } 3820 3821 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3822 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 3823 tmp[CPU->cpu_id] = regs[rd]; 3824 break; 3825 3826 case DIF_OP_LDTS: { 3827 dtrace_dynvar_t *dvar; 3828 dtrace_key_t *key; 3829 3830 id = DIF_INSTR_VAR(instr); 3831 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3832 id -= DIF_VAR_OTHER_UBASE; 3833 v = &vstate->dtvs_tlocals[id]; 3834 3835 key = &tupregs[DIF_DTR_NREGS]; 3836 key[0].dttk_value = (uint64_t)id; 3837 key[0].dttk_size = 0; 3838 DTRACE_TLS_THRKEY(key[1].dttk_value); 3839 key[1].dttk_size = 0; 3840 3841 dvar = dtrace_dynvar(dstate, 2, key, 3842 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC); 3843 3844 if (dvar == NULL) { 3845 regs[rd] = 0; 3846 break; 3847 } 3848 3849 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3850 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 3851 } else { 3852 regs[rd] = *((uint64_t *)dvar->dtdv_data); 3853 } 3854 3855 break; 3856 } 3857 3858 case DIF_OP_STTS: { 3859 dtrace_dynvar_t *dvar; 3860 dtrace_key_t *key; 3861 3862 id = DIF_INSTR_VAR(instr); 3863 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3864 id -= DIF_VAR_OTHER_UBASE; 3865 3866 key = &tupregs[DIF_DTR_NREGS]; 3867 key[0].dttk_value = (uint64_t)id; 3868 key[0].dttk_size = 0; 3869 DTRACE_TLS_THRKEY(key[1].dttk_value); 3870 key[1].dttk_size = 0; 3871 v = &vstate->dtvs_tlocals[id]; 3872 3873 dvar = dtrace_dynvar(dstate, 2, key, 3874 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 3875 v->dtdv_type.dtdt_size : sizeof (uint64_t), 3876 regs[rd] ? DTRACE_DYNVAR_ALLOC : 3877 DTRACE_DYNVAR_DEALLOC); 3878 3879 /* 3880 * Given that we're storing to thread-local data, 3881 * we need to flush our predicate cache. 3882 */ 3883 curthread->t_predcache = NULL; 3884 3885 if (dvar == NULL) 3886 break; 3887 3888 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3889 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3890 dvar->dtdv_data, &v->dtdv_type); 3891 } else { 3892 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 3893 } 3894 3895 break; 3896 } 3897 3898 case DIF_OP_SRA: 3899 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 3900 break; 3901 3902 case DIF_OP_CALL: 3903 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 3904 regs, tupregs, ttop, mstate, state); 3905 break; 3906 3907 case DIF_OP_PUSHTR: 3908 if (ttop == DIF_DTR_NREGS) { 3909 *flags |= CPU_DTRACE_TUPOFLOW; 3910 break; 3911 } 3912 3913 if (r1 == DIF_TYPE_STRING) { 3914 /* 3915 * If this is a string type and the size is 0, 3916 * we'll use the system-wide default string 3917 * size. Note that we are _not_ looking at 3918 * the value of the DTRACEOPT_STRSIZE option; 3919 * had this been set, we would expect to have 3920 * a non-zero size value in the "pushtr". 3921 */ 3922 tupregs[ttop].dttk_size = 3923 dtrace_strlen((char *)(uintptr_t)regs[rd], 3924 regs[r2] ? regs[r2] : 3925 dtrace_strsize_default) + 1; 3926 } else { 3927 tupregs[ttop].dttk_size = regs[r2]; 3928 } 3929 3930 tupregs[ttop++].dttk_value = regs[rd]; 3931 break; 3932 3933 case DIF_OP_PUSHTV: 3934 if (ttop == DIF_DTR_NREGS) { 3935 *flags |= CPU_DTRACE_TUPOFLOW; 3936 break; 3937 } 3938 3939 tupregs[ttop].dttk_value = regs[rd]; 3940 tupregs[ttop++].dttk_size = 0; 3941 break; 3942 3943 case DIF_OP_POPTS: 3944 if (ttop != 0) 3945 ttop--; 3946 break; 3947 3948 case DIF_OP_FLUSHTS: 3949 ttop = 0; 3950 break; 3951 3952 case DIF_OP_LDGAA: 3953 case DIF_OP_LDTAA: { 3954 dtrace_dynvar_t *dvar; 3955 dtrace_key_t *key = tupregs; 3956 uint_t nkeys = ttop; 3957 3958 id = DIF_INSTR_VAR(instr); 3959 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3960 id -= DIF_VAR_OTHER_UBASE; 3961 3962 key[nkeys].dttk_value = (uint64_t)id; 3963 key[nkeys++].dttk_size = 0; 3964 3965 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 3966 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 3967 key[nkeys++].dttk_size = 0; 3968 v = &vstate->dtvs_tlocals[id]; 3969 } else { 3970 v = &vstate->dtvs_globals[id]->dtsv_var; 3971 } 3972 3973 dvar = dtrace_dynvar(dstate, nkeys, key, 3974 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 3975 v->dtdv_type.dtdt_size : sizeof (uint64_t), 3976 DTRACE_DYNVAR_NOALLOC); 3977 3978 if (dvar == NULL) { 3979 regs[rd] = 0; 3980 break; 3981 } 3982 3983 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3984 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 3985 } else { 3986 regs[rd] = *((uint64_t *)dvar->dtdv_data); 3987 } 3988 3989 break; 3990 } 3991 3992 case DIF_OP_STGAA: 3993 case DIF_OP_STTAA: { 3994 dtrace_dynvar_t *dvar; 3995 dtrace_key_t *key = tupregs; 3996 uint_t nkeys = ttop; 3997 3998 id = DIF_INSTR_VAR(instr); 3999 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4000 id -= DIF_VAR_OTHER_UBASE; 4001 4002 key[nkeys].dttk_value = (uint64_t)id; 4003 key[nkeys++].dttk_size = 0; 4004 4005 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4006 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4007 key[nkeys++].dttk_size = 0; 4008 v = &vstate->dtvs_tlocals[id]; 4009 } else { 4010 v = &vstate->dtvs_globals[id]->dtsv_var; 4011 } 4012 4013 dvar = dtrace_dynvar(dstate, nkeys, key, 4014 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4015 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4016 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4017 DTRACE_DYNVAR_DEALLOC); 4018 4019 if (dvar == NULL) 4020 break; 4021 4022 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4023 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4024 dvar->dtdv_data, &v->dtdv_type); 4025 } else { 4026 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4027 } 4028 4029 break; 4030 } 4031 4032 case DIF_OP_ALLOCS: { 4033 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4034 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4035 4036 if (mstate->dtms_scratch_ptr + size > 4037 mstate->dtms_scratch_base + 4038 mstate->dtms_scratch_size) { 4039 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4040 regs[rd] = NULL; 4041 } else { 4042 dtrace_bzero((void *) 4043 mstate->dtms_scratch_ptr, size); 4044 mstate->dtms_scratch_ptr += size; 4045 regs[rd] = ptr; 4046 } 4047 break; 4048 } 4049 4050 case DIF_OP_COPYS: 4051 if (!dtrace_canstore(regs[rd], regs[r2], 4052 mstate, vstate)) { 4053 *flags |= CPU_DTRACE_BADADDR; 4054 *illval = regs[rd]; 4055 break; 4056 } 4057 4058 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4059 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4060 break; 4061 4062 case DIF_OP_STB: 4063 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4064 *flags |= CPU_DTRACE_BADADDR; 4065 *illval = regs[rd]; 4066 break; 4067 } 4068 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4069 break; 4070 4071 case DIF_OP_STH: 4072 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4073 *flags |= CPU_DTRACE_BADADDR; 4074 *illval = regs[rd]; 4075 break; 4076 } 4077 if (regs[rd] & 1) { 4078 *flags |= CPU_DTRACE_BADALIGN; 4079 *illval = regs[rd]; 4080 break; 4081 } 4082 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4083 break; 4084 4085 case DIF_OP_STW: 4086 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4087 *flags |= CPU_DTRACE_BADADDR; 4088 *illval = regs[rd]; 4089 break; 4090 } 4091 if (regs[rd] & 3) { 4092 *flags |= CPU_DTRACE_BADALIGN; 4093 *illval = regs[rd]; 4094 break; 4095 } 4096 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4097 break; 4098 4099 case DIF_OP_STX: 4100 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4101 *flags |= CPU_DTRACE_BADADDR; 4102 *illval = regs[rd]; 4103 break; 4104 } 4105 if (regs[rd] & 7) { 4106 *flags |= CPU_DTRACE_BADALIGN; 4107 *illval = regs[rd]; 4108 break; 4109 } 4110 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4111 break; 4112 } 4113 } 4114 4115 if (!(*flags & CPU_DTRACE_FAULT)) 4116 return (rval); 4117 4118 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4119 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4120 4121 return (0); 4122 } 4123 4124 static void 4125 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4126 { 4127 dtrace_probe_t *probe = ecb->dte_probe; 4128 dtrace_provider_t *prov = probe->dtpr_provider; 4129 char c[DTRACE_FULLNAMELEN + 80], *str; 4130 char *msg = "dtrace: breakpoint action at probe "; 4131 char *ecbmsg = " (ecb "; 4132 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4133 uintptr_t val = (uintptr_t)ecb; 4134 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4135 4136 if (dtrace_destructive_disallow) 4137 return; 4138 4139 /* 4140 * It's impossible to be taking action on the NULL probe. 4141 */ 4142 ASSERT(probe != NULL); 4143 4144 /* 4145 * This is a poor man's (destitute man's?) sprintf(): we want to 4146 * print the provider name, module name, function name and name of 4147 * the probe, along with the hex address of the ECB with the breakpoint 4148 * action -- all of which we must place in the character buffer by 4149 * hand. 4150 */ 4151 while (*msg != '\0') 4152 c[i++] = *msg++; 4153 4154 for (str = prov->dtpv_name; *str != '\0'; str++) 4155 c[i++] = *str; 4156 c[i++] = ':'; 4157 4158 for (str = probe->dtpr_mod; *str != '\0'; str++) 4159 c[i++] = *str; 4160 c[i++] = ':'; 4161 4162 for (str = probe->dtpr_func; *str != '\0'; str++) 4163 c[i++] = *str; 4164 c[i++] = ':'; 4165 4166 for (str = probe->dtpr_name; *str != '\0'; str++) 4167 c[i++] = *str; 4168 4169 while (*ecbmsg != '\0') 4170 c[i++] = *ecbmsg++; 4171 4172 while (shift >= 0) { 4173 mask = (uintptr_t)0xf << shift; 4174 4175 if (val >= ((uintptr_t)1 << shift)) 4176 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4177 shift -= 4; 4178 } 4179 4180 c[i++] = ')'; 4181 c[i] = '\0'; 4182 4183 debug_enter(c); 4184 } 4185 4186 static void 4187 dtrace_action_panic(dtrace_ecb_t *ecb) 4188 { 4189 dtrace_probe_t *probe = ecb->dte_probe; 4190 4191 /* 4192 * It's impossible to be taking action on the NULL probe. 4193 */ 4194 ASSERT(probe != NULL); 4195 4196 if (dtrace_destructive_disallow) 4197 return; 4198 4199 if (dtrace_panicked != NULL) 4200 return; 4201 4202 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4203 return; 4204 4205 /* 4206 * We won the right to panic. (We want to be sure that only one 4207 * thread calls panic() from dtrace_probe(), and that panic() is 4208 * called exactly once.) 4209 */ 4210 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4211 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4212 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4213 } 4214 4215 static void 4216 dtrace_action_raise(uint64_t sig) 4217 { 4218 if (dtrace_destructive_disallow) 4219 return; 4220 4221 if (sig >= NSIG) { 4222 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4223 return; 4224 } 4225 4226 /* 4227 * raise() has a queue depth of 1 -- we ignore all subsequent 4228 * invocations of the raise() action. 4229 */ 4230 if (curthread->t_dtrace_sig == 0) 4231 curthread->t_dtrace_sig = (uint8_t)sig; 4232 4233 curthread->t_sig_check = 1; 4234 aston(curthread); 4235 } 4236 4237 static void 4238 dtrace_action_stop(void) 4239 { 4240 if (dtrace_destructive_disallow) 4241 return; 4242 4243 if (!curthread->t_dtrace_stop) { 4244 curthread->t_dtrace_stop = 1; 4245 curthread->t_sig_check = 1; 4246 aston(curthread); 4247 } 4248 } 4249 4250 static void 4251 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4252 { 4253 hrtime_t now; 4254 volatile uint16_t *flags; 4255 cpu_t *cpu = CPU; 4256 4257 if (dtrace_destructive_disallow) 4258 return; 4259 4260 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4261 4262 now = dtrace_gethrtime(); 4263 4264 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4265 /* 4266 * We need to advance the mark to the current time. 4267 */ 4268 cpu->cpu_dtrace_chillmark = now; 4269 cpu->cpu_dtrace_chilled = 0; 4270 } 4271 4272 /* 4273 * Now check to see if the requested chill time would take us over 4274 * the maximum amount of time allowed in the chill interval. (Or 4275 * worse, if the calculation itself induces overflow.) 4276 */ 4277 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4278 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4279 *flags |= CPU_DTRACE_ILLOP; 4280 return; 4281 } 4282 4283 while (dtrace_gethrtime() - now < val) 4284 continue; 4285 4286 /* 4287 * Normally, we assure that the value of the variable "timestamp" does 4288 * not change within an ECB. The presence of chill() represents an 4289 * exception to this rule, however. 4290 */ 4291 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4292 cpu->cpu_dtrace_chilled += val; 4293 } 4294 4295 static void 4296 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4297 uint64_t *buf, uint64_t arg) 4298 { 4299 int nframes = DTRACE_USTACK_NFRAMES(arg); 4300 int strsize = DTRACE_USTACK_STRSIZE(arg); 4301 uint64_t *pcs = &buf[1], *fps; 4302 char *str = (char *)&pcs[nframes]; 4303 int size, offs = 0, i, j; 4304 uintptr_t old = mstate->dtms_scratch_ptr, saved; 4305 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4306 char *sym; 4307 4308 /* 4309 * Should be taking a faster path if string space has not been 4310 * allocated. 4311 */ 4312 ASSERT(strsize != 0); 4313 4314 /* 4315 * We will first allocate some temporary space for the frame pointers. 4316 */ 4317 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4318 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 4319 (nframes * sizeof (uint64_t)); 4320 4321 if (mstate->dtms_scratch_ptr + size > 4322 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 4323 /* 4324 * Not enough room for our frame pointers -- need to indicate 4325 * that we ran out of scratch space. 4326 */ 4327 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4328 return; 4329 } 4330 4331 mstate->dtms_scratch_ptr += size; 4332 saved = mstate->dtms_scratch_ptr; 4333 4334 /* 4335 * Now get a stack with both program counters and frame pointers. 4336 */ 4337 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4338 dtrace_getufpstack(buf, fps, nframes + 1); 4339 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4340 4341 /* 4342 * If that faulted, we're cooked. 4343 */ 4344 if (*flags & CPU_DTRACE_FAULT) 4345 goto out; 4346 4347 /* 4348 * Now we want to walk up the stack, calling the USTACK helper. For 4349 * each iteration, we restore the scratch pointer. 4350 */ 4351 for (i = 0; i < nframes; i++) { 4352 mstate->dtms_scratch_ptr = saved; 4353 4354 if (offs >= strsize) 4355 break; 4356 4357 sym = (char *)(uintptr_t)dtrace_helper( 4358 DTRACE_HELPER_ACTION_USTACK, 4359 mstate, state, pcs[i], fps[i]); 4360 4361 /* 4362 * If we faulted while running the helper, we're going to 4363 * clear the fault and null out the corresponding string. 4364 */ 4365 if (*flags & CPU_DTRACE_FAULT) { 4366 *flags &= ~CPU_DTRACE_FAULT; 4367 str[offs++] = '\0'; 4368 continue; 4369 } 4370 4371 if (sym == NULL) { 4372 str[offs++] = '\0'; 4373 continue; 4374 } 4375 4376 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4377 4378 /* 4379 * Now copy in the string that the helper returned to us. 4380 */ 4381 for (j = 0; offs + j < strsize; j++) { 4382 if ((str[offs + j] = sym[j]) == '\0') 4383 break; 4384 } 4385 4386 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4387 4388 /* 4389 * If we didn't have room for all of the last string, break 4390 * out -- the loop at the end will take clear of zeroing the 4391 * remainder of the string table. 4392 */ 4393 if (offs + j >= strsize) 4394 break; 4395 4396 offs += j + 1; 4397 } 4398 4399 while (offs < strsize) 4400 str[offs++] = '\0'; 4401 4402 out: 4403 mstate->dtms_scratch_ptr = old; 4404 } 4405 4406 /* 4407 * If you're looking for the epicenter of DTrace, you just found it. This 4408 * is the function called by the provider to fire a probe -- from which all 4409 * subsequent probe-context DTrace activity emanates. 4410 */ 4411 void 4412 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 4413 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 4414 { 4415 processorid_t cpuid; 4416 dtrace_icookie_t cookie; 4417 dtrace_probe_t *probe; 4418 dtrace_mstate_t mstate; 4419 dtrace_ecb_t *ecb; 4420 dtrace_action_t *act; 4421 intptr_t offs; 4422 size_t size; 4423 int vtime, onintr; 4424 volatile uint16_t *flags; 4425 hrtime_t now; 4426 4427 /* 4428 * Kick out immediately if this CPU is still being born (in which case 4429 * curthread will be set to -1) 4430 */ 4431 if ((uintptr_t)curthread & 1) 4432 return; 4433 4434 cookie = dtrace_interrupt_disable(); 4435 probe = dtrace_probes[id - 1]; 4436 cpuid = CPU->cpu_id; 4437 onintr = CPU_ON_INTR(CPU); 4438 4439 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 4440 probe->dtpr_predcache == curthread->t_predcache) { 4441 /* 4442 * We have hit in the predicate cache; we know that 4443 * this predicate would evaluate to be false. 4444 */ 4445 dtrace_interrupt_enable(cookie); 4446 return; 4447 } 4448 4449 if (panic_quiesce) { 4450 /* 4451 * We don't trace anything if we're panicking. 4452 */ 4453 dtrace_interrupt_enable(cookie); 4454 return; 4455 } 4456 4457 now = dtrace_gethrtime(); 4458 vtime = dtrace_vtime_references != 0; 4459 4460 if (vtime && curthread->t_dtrace_start) 4461 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 4462 4463 mstate.dtms_probe = probe; 4464 mstate.dtms_arg[0] = arg0; 4465 mstate.dtms_arg[1] = arg1; 4466 mstate.dtms_arg[2] = arg2; 4467 mstate.dtms_arg[3] = arg3; 4468 mstate.dtms_arg[4] = arg4; 4469 4470 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 4471 4472 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 4473 dtrace_predicate_t *pred = ecb->dte_predicate; 4474 dtrace_state_t *state = ecb->dte_state; 4475 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 4476 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 4477 dtrace_vstate_t *vstate = &state->dts_vstate; 4478 dtrace_provider_t *prov = probe->dtpr_provider; 4479 int committed = 0; 4480 caddr_t tomax; 4481 4482 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 4483 *flags &= ~CPU_DTRACE_ERROR; 4484 4485 if (prov == dtrace_provider) { 4486 /* 4487 * If dtrace itself is the provider of this probe, 4488 * we're only going to continue processing the ECB if 4489 * arg0 (the dtrace_state_t) is equal to the ECB's 4490 * creating state. (This prevents disjoint consumers 4491 * from seeing one another's metaprobes.) 4492 */ 4493 if (arg0 != (uint64_t)(uintptr_t)state) 4494 continue; 4495 } 4496 4497 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 4498 /* 4499 * We're not currently active. If our provider isn't 4500 * the dtrace pseudo provider, we're not interested. 4501 */ 4502 if (prov != dtrace_provider) 4503 continue; 4504 4505 /* 4506 * Now we must further check if we are in the BEGIN 4507 * probe. If we are, we will only continue processing 4508 * if we're still in WARMUP -- if one BEGIN enabling 4509 * has invoked the exit() action, we don't want to 4510 * evaluate subsequent BEGIN enablings. 4511 */ 4512 if (probe->dtpr_id == dtrace_probeid_begin && 4513 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 4514 ASSERT(state->dts_activity == 4515 DTRACE_ACTIVITY_DRAINING); 4516 continue; 4517 } 4518 } 4519 4520 if (ecb->dte_cond) { 4521 /* 4522 * If the dte_cond bits indicate that this 4523 * consumer is only allowed to see user-mode firings 4524 * of this probe, call the provider's dtps_usermode() 4525 * entry point to check that the probe was fired 4526 * while in a user context. Skip this ECB if that's 4527 * not the case. 4528 */ 4529 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 4530 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 4531 probe->dtpr_id, probe->dtpr_arg) == 0) 4532 continue; 4533 4534 /* 4535 * This is more subtle than it looks. We have to be 4536 * absolutely certain that CRED() isn't going to 4537 * change out from under us so it's only legit to 4538 * examine that structure if we're in constrained 4539 * situations. Currently, the only times we'll this 4540 * check is if a non-super-user has enabled the 4541 * profile or syscall providers -- providers that 4542 * allow visibility of all processes. For the 4543 * profile case, the check above will ensure that 4544 * we're examining a user context. 4545 */ 4546 if (ecb->dte_cond & DTRACE_COND_OWNER) { 4547 uid_t uid = ecb->dte_state->dts_cred.dcr_uid; 4548 gid_t gid = ecb->dte_state->dts_cred.dcr_gid; 4549 cred_t *cr; 4550 proc_t *proc; 4551 4552 if ((cr = CRED()) == NULL || 4553 uid != cr->cr_uid || 4554 uid != cr->cr_ruid || 4555 uid != cr->cr_suid || 4556 gid != cr->cr_gid || 4557 gid != cr->cr_rgid || 4558 gid != cr->cr_sgid || 4559 (proc = ttoproc(curthread)) == NULL || 4560 (proc->p_flag & SNOCD)) 4561 continue; 4562 4563 } 4564 } 4565 4566 if (now - state->dts_alive > dtrace_deadman_timeout) { 4567 /* 4568 * We seem to be dead. Unless we (a) have kernel 4569 * destructive permissions (b) have expicitly enabled 4570 * destructive actions and (c) destructive actions have 4571 * not been disabled, we're going to transition into 4572 * the KILLED state, from which no further processing 4573 * on this state will be performed. 4574 */ 4575 if (!dtrace_priv_kernel_destructive(state) || 4576 !state->dts_cred.dcr_destructive || 4577 dtrace_destructive_disallow) { 4578 void *activity = &state->dts_activity; 4579 dtrace_activity_t current; 4580 4581 do { 4582 current = state->dts_activity; 4583 } while (dtrace_cas32(activity, current, 4584 DTRACE_ACTIVITY_KILLED) != current); 4585 4586 continue; 4587 } 4588 } 4589 4590 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 4591 ecb->dte_alignment, state, &mstate)) < 0) 4592 continue; 4593 4594 tomax = buf->dtb_tomax; 4595 ASSERT(tomax != NULL); 4596 4597 if (ecb->dte_size != 0) 4598 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 4599 4600 mstate.dtms_epid = ecb->dte_epid; 4601 mstate.dtms_present |= DTRACE_MSTATE_EPID; 4602 4603 if (pred != NULL) { 4604 dtrace_difo_t *dp = pred->dtp_difo; 4605 int rval; 4606 4607 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 4608 4609 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 4610 dtrace_cacheid_t cid = probe->dtpr_predcache; 4611 4612 if (cid != DTRACE_CACHEIDNONE && !onintr) { 4613 /* 4614 * Update the predicate cache... 4615 */ 4616 ASSERT(cid == pred->dtp_cacheid); 4617 curthread->t_predcache = cid; 4618 } 4619 4620 continue; 4621 } 4622 } 4623 4624 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 4625 act != NULL; act = act->dta_next) { 4626 uint64_t val; 4627 size_t valoffs; 4628 dtrace_difo_t *dp; 4629 dtrace_recdesc_t *rec = &act->dta_rec; 4630 4631 size = rec->dtrd_size; 4632 valoffs = offs + rec->dtrd_offset; 4633 4634 if (DTRACEACT_ISAGG(act->dta_kind)) { 4635 uint64_t v = 0xbad; 4636 dtrace_aggregation_t *agg; 4637 4638 agg = (dtrace_aggregation_t *)act; 4639 4640 if ((dp = act->dta_difo) != NULL) 4641 v = dtrace_dif_emulate(dp, 4642 &mstate, vstate, state); 4643 4644 if (*flags & CPU_DTRACE_ERROR) 4645 continue; 4646 4647 dtrace_aggregate(agg, buf, offs, aggbuf, v); 4648 continue; 4649 } 4650 4651 switch (act->dta_kind) { 4652 case DTRACEACT_STOP: 4653 if (dtrace_priv_proc_destructive(state)) 4654 dtrace_action_stop(); 4655 continue; 4656 4657 case DTRACEACT_BREAKPOINT: 4658 if (dtrace_priv_kernel_destructive(state)) 4659 dtrace_action_breakpoint(ecb); 4660 continue; 4661 4662 case DTRACEACT_PANIC: 4663 if (dtrace_priv_kernel_destructive(state)) 4664 dtrace_action_panic(ecb); 4665 continue; 4666 4667 case DTRACEACT_STACK: 4668 if (!dtrace_priv_kernel(state)) 4669 continue; 4670 4671 dtrace_getpcstack((pc_t *)(tomax + valoffs), 4672 size / sizeof (pc_t), probe->dtpr_aframes, 4673 DTRACE_ANCHORED(probe) ? NULL : 4674 (uint32_t *)arg0); 4675 4676 continue; 4677 4678 case DTRACEACT_JSTACK: 4679 case DTRACEACT_USTACK: 4680 if (!dtrace_priv_proc(state)) 4681 continue; 4682 4683 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 4684 curproc->p_dtrace_helpers != NULL) { 4685 /* 4686 * This is the slow path -- we have 4687 * allocated string space, and we're 4688 * getting the stack of a process that 4689 * has helpers. Call into a separate 4690 * routine to perform this processing. 4691 */ 4692 dtrace_action_ustack(&mstate, state, 4693 (uint64_t *)(tomax + valoffs), 4694 rec->dtrd_arg); 4695 continue; 4696 } 4697 4698 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4699 dtrace_getupcstack((uint64_t *) 4700 (tomax + valoffs), 4701 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 4702 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4703 continue; 4704 4705 default: 4706 break; 4707 } 4708 4709 dp = act->dta_difo; 4710 ASSERT(dp != NULL); 4711 4712 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 4713 4714 if (*flags & CPU_DTRACE_ERROR) 4715 continue; 4716 4717 switch (act->dta_kind) { 4718 case DTRACEACT_SPECULATE: 4719 ASSERT(buf == &state->dts_buffer[cpuid]); 4720 buf = dtrace_speculation_buffer(state, 4721 cpuid, val); 4722 4723 if (buf == NULL) { 4724 *flags |= CPU_DTRACE_DROP; 4725 continue; 4726 } 4727 4728 offs = dtrace_buffer_reserve(buf, 4729 ecb->dte_needed, ecb->dte_alignment, 4730 state, NULL); 4731 4732 if (offs < 0) { 4733 *flags |= CPU_DTRACE_DROP; 4734 continue; 4735 } 4736 4737 tomax = buf->dtb_tomax; 4738 ASSERT(tomax != NULL); 4739 4740 if (ecb->dte_size != 0) 4741 DTRACE_STORE(uint32_t, tomax, offs, 4742 ecb->dte_epid); 4743 continue; 4744 4745 case DTRACEACT_CHILL: 4746 if (dtrace_priv_kernel_destructive(state)) 4747 dtrace_action_chill(&mstate, val); 4748 continue; 4749 4750 case DTRACEACT_RAISE: 4751 if (dtrace_priv_proc_destructive(state)) 4752 dtrace_action_raise(val); 4753 continue; 4754 4755 case DTRACEACT_COMMIT: 4756 ASSERT(!committed); 4757 4758 /* 4759 * We need to commit our buffer state. 4760 */ 4761 if (ecb->dte_size) 4762 buf->dtb_offset = offs + ecb->dte_size; 4763 buf = &state->dts_buffer[cpuid]; 4764 dtrace_speculation_commit(state, cpuid, val); 4765 committed = 1; 4766 continue; 4767 4768 case DTRACEACT_DISCARD: 4769 dtrace_speculation_discard(state, cpuid, val); 4770 continue; 4771 4772 case DTRACEACT_DIFEXPR: 4773 case DTRACEACT_LIBACT: 4774 case DTRACEACT_PRINTF: 4775 case DTRACEACT_PRINTA: 4776 case DTRACEACT_SYSTEM: 4777 case DTRACEACT_FREOPEN: 4778 break; 4779 4780 case DTRACEACT_EXIT: { 4781 /* 4782 * For the exit action, we are going to attempt 4783 * to atomically set our activity to be 4784 * draining. If this fails (either because 4785 * another CPU has beat us to the exit action, 4786 * or because our current activity is something 4787 * other than ACTIVE or WARMUP), we will 4788 * continue. This assures that the exit action 4789 * can be successfully recorded at most once 4790 * when we're in the ACTIVE state. If we're 4791 * encountering the exit() action while in 4792 * COOLDOWN, however, we want to honor the new 4793 * status code. (We know that we're the only 4794 * thread in COOLDOWN, so there is no race.) 4795 */ 4796 void *activity = &state->dts_activity; 4797 dtrace_activity_t current = state->dts_activity; 4798 4799 if (current == DTRACE_ACTIVITY_COOLDOWN) 4800 break; 4801 4802 if (current != DTRACE_ACTIVITY_WARMUP) 4803 current = DTRACE_ACTIVITY_ACTIVE; 4804 4805 if (dtrace_cas32(activity, current, 4806 DTRACE_ACTIVITY_DRAINING) != current) { 4807 *flags |= CPU_DTRACE_DROP; 4808 continue; 4809 } 4810 4811 break; 4812 } 4813 4814 default: 4815 ASSERT(0); 4816 } 4817 4818 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 4819 uintptr_t end = valoffs + size; 4820 4821 /* 4822 * If this is a string, we're going to only 4823 * load until we find the zero byte -- after 4824 * which we'll store zero bytes. 4825 */ 4826 if (dp->dtdo_rtype.dtdt_kind == 4827 DIF_TYPE_STRING) { 4828 char c = '\0' + 1; 4829 size_t s; 4830 4831 for (s = 0; s < size; s++) { 4832 if (c != '\0') 4833 c = dtrace_load8(val++); 4834 4835 DTRACE_STORE(uint8_t, tomax, 4836 valoffs++, c); 4837 } 4838 4839 continue; 4840 } 4841 4842 while (valoffs < end) { 4843 DTRACE_STORE(uint8_t, tomax, valoffs++, 4844 dtrace_load8(val++)); 4845 } 4846 4847 continue; 4848 } 4849 4850 switch (size) { 4851 case 0: 4852 break; 4853 4854 case sizeof (uint8_t): 4855 DTRACE_STORE(uint8_t, tomax, valoffs, val); 4856 break; 4857 case sizeof (uint16_t): 4858 DTRACE_STORE(uint16_t, tomax, valoffs, val); 4859 break; 4860 case sizeof (uint32_t): 4861 DTRACE_STORE(uint32_t, tomax, valoffs, val); 4862 break; 4863 case sizeof (uint64_t): 4864 DTRACE_STORE(uint64_t, tomax, valoffs, val); 4865 break; 4866 default: 4867 /* 4868 * Any other size should have been returned by 4869 * reference, not by value. 4870 */ 4871 ASSERT(0); 4872 break; 4873 } 4874 } 4875 4876 if (*flags & CPU_DTRACE_DROP) 4877 continue; 4878 4879 if (*flags & CPU_DTRACE_FAULT) { 4880 int ndx; 4881 dtrace_action_t *err; 4882 4883 buf->dtb_errors++; 4884 4885 if (probe->dtpr_id == dtrace_probeid_error) { 4886 /* 4887 * There's nothing we can do -- we had an 4888 * error on the error probe. 4889 */ 4890 dtrace_double_errors++; 4891 continue; 4892 } 4893 4894 if (vtime) { 4895 /* 4896 * Before recursing on dtrace_probe(), we 4897 * need to explicitly clear out our start 4898 * time to prevent it from being accumulated 4899 * into t_dtrace_vtime. 4900 */ 4901 curthread->t_dtrace_start = 0; 4902 } 4903 4904 /* 4905 * Iterate over the actions to figure out which action 4906 * we were processing when we experienced the error. 4907 * Note that act points _past_ the faulting action; if 4908 * act is ecb->dte_action, the fault was in the 4909 * predicate, if it's ecb->dte_action->dta_next it's 4910 * in action #1, and so on. 4911 */ 4912 for (err = ecb->dte_action, ndx = 0; 4913 err != act; err = err->dta_next, ndx++) 4914 continue; 4915 4916 dtrace_probe_error(state, ecb->dte_epid, ndx, 4917 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 4918 mstate.dtms_fltoffs : -1, 4919 (*flags & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : 4920 (*flags & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : 4921 (*flags & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : 4922 (*flags & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : 4923 (*flags & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : 4924 (*flags & CPU_DTRACE_TUPOFLOW) ? 4925 DTRACEFLT_TUPOFLOW : 4926 (*flags & CPU_DTRACE_BADALIGN) ? 4927 DTRACEFLT_BADALIGN : 4928 (*flags & CPU_DTRACE_NOSCRATCH) ? 4929 DTRACEFLT_NOSCRATCH : DTRACEFLT_UNKNOWN, 4930 cpu_core[cpuid].cpuc_dtrace_illval); 4931 4932 continue; 4933 } 4934 4935 if (!committed) 4936 buf->dtb_offset = offs + ecb->dte_size; 4937 } 4938 4939 if (vtime) 4940 curthread->t_dtrace_start = dtrace_gethrtime(); 4941 4942 dtrace_interrupt_enable(cookie); 4943 } 4944 4945 /* 4946 * DTrace Probe Hashing Functions 4947 * 4948 * The functions in this section (and indeed, the functions in remaining 4949 * sections) are not _called_ from probe context. (Any exceptions to this are 4950 * marked with a "Note:".) Rather, they are called from elsewhere in the 4951 * DTrace framework to look-up probes in, add probes to and remove probes from 4952 * the DTrace probe hashes. (Each probe is hashed by each element of the 4953 * probe tuple -- allowing for fast lookups, regardless of what was 4954 * specified.) 4955 */ 4956 static uint_t 4957 dtrace_hash_str(char *p) 4958 { 4959 unsigned int g; 4960 uint_t hval = 0; 4961 4962 while (*p) { 4963 hval = (hval << 4) + *p++; 4964 if ((g = (hval & 0xf0000000)) != 0) 4965 hval ^= g >> 24; 4966 hval &= ~g; 4967 } 4968 return (hval); 4969 } 4970 4971 static dtrace_hash_t * 4972 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 4973 { 4974 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 4975 4976 hash->dth_stroffs = stroffs; 4977 hash->dth_nextoffs = nextoffs; 4978 hash->dth_prevoffs = prevoffs; 4979 4980 hash->dth_size = 1; 4981 hash->dth_mask = hash->dth_size - 1; 4982 4983 hash->dth_tab = kmem_zalloc(hash->dth_size * 4984 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 4985 4986 return (hash); 4987 } 4988 4989 static void 4990 dtrace_hash_destroy(dtrace_hash_t *hash) 4991 { 4992 #ifdef DEBUG 4993 int i; 4994 4995 for (i = 0; i < hash->dth_size; i++) 4996 ASSERT(hash->dth_tab[i] == NULL); 4997 #endif 4998 4999 kmem_free(hash->dth_tab, 5000 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5001 kmem_free(hash, sizeof (dtrace_hash_t)); 5002 } 5003 5004 static void 5005 dtrace_hash_resize(dtrace_hash_t *hash) 5006 { 5007 int size = hash->dth_size, i, ndx; 5008 int new_size = hash->dth_size << 1; 5009 int new_mask = new_size - 1; 5010 dtrace_hashbucket_t **new_tab, *bucket, *next; 5011 5012 ASSERT((new_size & new_mask) == 0); 5013 5014 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5015 5016 for (i = 0; i < size; i++) { 5017 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5018 dtrace_probe_t *probe = bucket->dthb_chain; 5019 5020 ASSERT(probe != NULL); 5021 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5022 5023 next = bucket->dthb_next; 5024 bucket->dthb_next = new_tab[ndx]; 5025 new_tab[ndx] = bucket; 5026 } 5027 } 5028 5029 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5030 hash->dth_tab = new_tab; 5031 hash->dth_size = new_size; 5032 hash->dth_mask = new_mask; 5033 } 5034 5035 static void 5036 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5037 { 5038 int hashval = DTRACE_HASHSTR(hash, new); 5039 int ndx = hashval & hash->dth_mask; 5040 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5041 dtrace_probe_t **nextp, **prevp; 5042 5043 for (; bucket != NULL; bucket = bucket->dthb_next) { 5044 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5045 goto add; 5046 } 5047 5048 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5049 dtrace_hash_resize(hash); 5050 dtrace_hash_add(hash, new); 5051 return; 5052 } 5053 5054 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5055 bucket->dthb_next = hash->dth_tab[ndx]; 5056 hash->dth_tab[ndx] = bucket; 5057 hash->dth_nbuckets++; 5058 5059 add: 5060 nextp = DTRACE_HASHNEXT(hash, new); 5061 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5062 *nextp = bucket->dthb_chain; 5063 5064 if (bucket->dthb_chain != NULL) { 5065 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5066 ASSERT(*prevp == NULL); 5067 *prevp = new; 5068 } 5069 5070 bucket->dthb_chain = new; 5071 bucket->dthb_len++; 5072 } 5073 5074 static dtrace_probe_t * 5075 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5076 { 5077 int hashval = DTRACE_HASHSTR(hash, template); 5078 int ndx = hashval & hash->dth_mask; 5079 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5080 5081 for (; bucket != NULL; bucket = bucket->dthb_next) { 5082 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5083 return (bucket->dthb_chain); 5084 } 5085 5086 return (NULL); 5087 } 5088 5089 static int 5090 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5091 { 5092 int hashval = DTRACE_HASHSTR(hash, template); 5093 int ndx = hashval & hash->dth_mask; 5094 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5095 5096 for (; bucket != NULL; bucket = bucket->dthb_next) { 5097 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5098 return (bucket->dthb_len); 5099 } 5100 5101 return (NULL); 5102 } 5103 5104 static void 5105 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5106 { 5107 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5108 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5109 5110 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5111 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5112 5113 /* 5114 * Find the bucket that we're removing this probe from. 5115 */ 5116 for (; bucket != NULL; bucket = bucket->dthb_next) { 5117 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5118 break; 5119 } 5120 5121 ASSERT(bucket != NULL); 5122 5123 if (*prevp == NULL) { 5124 if (*nextp == NULL) { 5125 /* 5126 * The removed probe was the only probe on this 5127 * bucket; we need to remove the bucket. 5128 */ 5129 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5130 5131 ASSERT(bucket->dthb_chain == probe); 5132 ASSERT(b != NULL); 5133 5134 if (b == bucket) { 5135 hash->dth_tab[ndx] = bucket->dthb_next; 5136 } else { 5137 while (b->dthb_next != bucket) 5138 b = b->dthb_next; 5139 b->dthb_next = bucket->dthb_next; 5140 } 5141 5142 ASSERT(hash->dth_nbuckets > 0); 5143 hash->dth_nbuckets--; 5144 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5145 return; 5146 } 5147 5148 bucket->dthb_chain = *nextp; 5149 } else { 5150 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5151 } 5152 5153 if (*nextp != NULL) 5154 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5155 } 5156 5157 /* 5158 * DTrace Utility Functions 5159 * 5160 * These are random utility functions that are _not_ called from probe context. 5161 */ 5162 static int 5163 dtrace_badattr(const dtrace_attribute_t *a) 5164 { 5165 return (a->dtat_name > DTRACE_STABILITY_MAX || 5166 a->dtat_data > DTRACE_STABILITY_MAX || 5167 a->dtat_class > DTRACE_CLASS_MAX); 5168 } 5169 5170 /* 5171 * Return a duplicate copy of a string. If the specified string is NULL, 5172 * this function returns a zero-length string. 5173 */ 5174 static char * 5175 dtrace_strdup(const char *str) 5176 { 5177 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5178 5179 if (str != NULL) 5180 (void) strcpy(new, str); 5181 5182 return (new); 5183 } 5184 5185 #define DTRACE_ISALPHA(c) \ 5186 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5187 5188 static int 5189 dtrace_badname(const char *s) 5190 { 5191 char c; 5192 5193 if (s == NULL || (c = *s++) == '\0') 5194 return (0); 5195 5196 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5197 return (1); 5198 5199 while ((c = *s++) != '\0') { 5200 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5201 c != '-' && c != '_' && c != '.' && c != '`') 5202 return (1); 5203 } 5204 5205 return (0); 5206 } 5207 5208 static void 5209 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp) 5210 { 5211 uint32_t priv; 5212 5213 *uidp = crgetuid(cr); 5214 if (PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5215 priv = DTRACE_PRIV_ALL; 5216 } else { 5217 priv = 0; 5218 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 5219 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 5220 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 5221 priv |= DTRACE_PRIV_USER; 5222 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 5223 priv |= DTRACE_PRIV_PROC; 5224 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 5225 priv |= DTRACE_PRIV_OWNER; 5226 } 5227 5228 *privp = priv; 5229 } 5230 5231 #ifdef DTRACE_ERRDEBUG 5232 static void 5233 dtrace_errdebug(const char *str) 5234 { 5235 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 5236 int occupied = 0; 5237 5238 mutex_enter(&dtrace_errlock); 5239 dtrace_errlast = str; 5240 dtrace_errthread = curthread; 5241 5242 while (occupied++ < DTRACE_ERRHASHSZ) { 5243 if (dtrace_errhash[hval].dter_msg == str) { 5244 dtrace_errhash[hval].dter_count++; 5245 goto out; 5246 } 5247 5248 if (dtrace_errhash[hval].dter_msg != NULL) { 5249 hval = (hval + 1) % DTRACE_ERRHASHSZ; 5250 continue; 5251 } 5252 5253 dtrace_errhash[hval].dter_msg = str; 5254 dtrace_errhash[hval].dter_count = 1; 5255 goto out; 5256 } 5257 5258 panic("dtrace: undersized error hash"); 5259 out: 5260 mutex_exit(&dtrace_errlock); 5261 } 5262 #endif 5263 5264 /* 5265 * DTrace Matching Functions 5266 * 5267 * These functions are used to match groups of probes, given some elements of 5268 * a probe tuple, or some globbed expressions for elements of a probe tuple. 5269 */ 5270 static int 5271 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid) 5272 { 5273 if (priv != DTRACE_PRIV_ALL) { 5274 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 5275 uint32_t match = priv & ppriv; 5276 5277 /* 5278 * No PRIV_DTRACE_* privileges... 5279 */ 5280 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 5281 DTRACE_PRIV_KERNEL)) == 0) 5282 return (0); 5283 5284 /* 5285 * No matching bits, but there were bits to match... 5286 */ 5287 if (match == 0 && ppriv != 0) 5288 return (0); 5289 5290 /* 5291 * Need to have permissions to the process, but don't... 5292 */ 5293 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 5294 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) 5295 return (0); 5296 } 5297 5298 return (1); 5299 } 5300 5301 /* 5302 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 5303 * consists of input pattern strings and an ops-vector to evaluate them. 5304 * This function returns >0 for match, 0 for no match, and <0 for error. 5305 */ 5306 static int 5307 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 5308 uint32_t priv, uid_t uid) 5309 { 5310 dtrace_provider_t *pvp = prp->dtpr_provider; 5311 int rv; 5312 5313 if (pvp->dtpv_defunct) 5314 return (0); 5315 5316 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 5317 return (rv); 5318 5319 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 5320 return (rv); 5321 5322 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 5323 return (rv); 5324 5325 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 5326 return (rv); 5327 5328 if (dtrace_match_priv(prp, priv, uid) == 0) 5329 return (0); 5330 5331 return (rv); 5332 } 5333 5334 /* 5335 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 5336 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 5337 * libc's version, the kernel version only applies to 8-bit ASCII strings. 5338 * In addition, all of the recursion cases except for '*' matching have been 5339 * unwound. For '*', we still implement recursive evaluation, but a depth 5340 * counter is maintained and matching is aborted if we recurse too deep. 5341 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 5342 */ 5343 static int 5344 dtrace_match_glob(const char *s, const char *p, int depth) 5345 { 5346 const char *olds; 5347 char s1, c; 5348 int gs; 5349 5350 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 5351 return (-1); 5352 5353 if (s == NULL) 5354 s = ""; /* treat NULL as empty string */ 5355 5356 top: 5357 olds = s; 5358 s1 = *s++; 5359 5360 if (p == NULL) 5361 return (0); 5362 5363 if ((c = *p++) == '\0') 5364 return (s1 == '\0'); 5365 5366 switch (c) { 5367 case '[': { 5368 int ok = 0, notflag = 0; 5369 char lc = '\0'; 5370 5371 if (s1 == '\0') 5372 return (0); 5373 5374 if (*p == '!') { 5375 notflag = 1; 5376 p++; 5377 } 5378 5379 if ((c = *p++) == '\0') 5380 return (0); 5381 5382 do { 5383 if (c == '-' && lc != '\0' && *p != ']') { 5384 if ((c = *p++) == '\0') 5385 return (0); 5386 if (c == '\\' && (c = *p++) == '\0') 5387 return (0); 5388 5389 if (notflag) { 5390 if (s1 < lc || s1 > c) 5391 ok++; 5392 else 5393 return (0); 5394 } else if (lc <= s1 && s1 <= c) 5395 ok++; 5396 5397 } else if (c == '\\' && (c = *p++) == '\0') 5398 return (0); 5399 5400 lc = c; /* save left-hand 'c' for next iteration */ 5401 5402 if (notflag) { 5403 if (s1 != c) 5404 ok++; 5405 else 5406 return (0); 5407 } else if (s1 == c) 5408 ok++; 5409 5410 if ((c = *p++) == '\0') 5411 return (0); 5412 5413 } while (c != ']'); 5414 5415 if (ok) 5416 goto top; 5417 5418 return (0); 5419 } 5420 5421 case '\\': 5422 if ((c = *p++) == '\0') 5423 return (0); 5424 /*FALLTHRU*/ 5425 5426 default: 5427 if (c != s1) 5428 return (0); 5429 /*FALLTHRU*/ 5430 5431 case '?': 5432 if (s1 != '\0') 5433 goto top; 5434 return (0); 5435 5436 case '*': 5437 while (*p == '*') 5438 p++; /* consecutive *'s are identical to a single one */ 5439 5440 if (*p == '\0') 5441 return (1); 5442 5443 for (s = olds; *s != '\0'; s++) { 5444 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 5445 return (gs); 5446 } 5447 5448 return (0); 5449 } 5450 } 5451 5452 /*ARGSUSED*/ 5453 static int 5454 dtrace_match_string(const char *s, const char *p, int depth) 5455 { 5456 return (s != NULL && strcmp(s, p) == 0); 5457 } 5458 5459 /*ARGSUSED*/ 5460 static int 5461 dtrace_match_nul(const char *s, const char *p, int depth) 5462 { 5463 return (1); /* always match the empty pattern */ 5464 } 5465 5466 /*ARGSUSED*/ 5467 static int 5468 dtrace_match_nonzero(const char *s, const char *p, int depth) 5469 { 5470 return (s != NULL && s[0] != '\0'); 5471 } 5472 5473 static int 5474 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 5475 int (*matched)(dtrace_probe_t *, void *), void *arg) 5476 { 5477 dtrace_probe_t template, *probe; 5478 dtrace_hash_t *hash = NULL; 5479 int len, best = INT_MAX, nmatched = 0; 5480 dtrace_id_t i; 5481 5482 ASSERT(MUTEX_HELD(&dtrace_lock)); 5483 5484 /* 5485 * If the probe ID is specified in the key, just lookup by ID and 5486 * invoke the match callback once if a matching probe is found. 5487 */ 5488 if (pkp->dtpk_id != DTRACE_IDNONE) { 5489 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 5490 dtrace_match_probe(probe, pkp, priv, uid) > 0) { 5491 (void) (*matched)(probe, arg); 5492 nmatched++; 5493 } 5494 return (nmatched); 5495 } 5496 5497 template.dtpr_mod = (char *)pkp->dtpk_mod; 5498 template.dtpr_func = (char *)pkp->dtpk_func; 5499 template.dtpr_name = (char *)pkp->dtpk_name; 5500 5501 /* 5502 * We want to find the most distinct of the module name, function 5503 * name, and name. So for each one that is not a glob pattern or 5504 * empty string, we perform a lookup in the corresponding hash and 5505 * use the hash table with the fewest collisions to do our search. 5506 */ 5507 if (pkp->dtpk_mmatch == &dtrace_match_string && 5508 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 5509 best = len; 5510 hash = dtrace_bymod; 5511 } 5512 5513 if (pkp->dtpk_fmatch == &dtrace_match_string && 5514 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 5515 best = len; 5516 hash = dtrace_byfunc; 5517 } 5518 5519 if (pkp->dtpk_nmatch == &dtrace_match_string && 5520 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 5521 best = len; 5522 hash = dtrace_byname; 5523 } 5524 5525 /* 5526 * If we did not select a hash table, iterate over every probe and 5527 * invoke our callback for each one that matches our input probe key. 5528 */ 5529 if (hash == NULL) { 5530 for (i = 0; i < dtrace_nprobes; i++) { 5531 if ((probe = dtrace_probes[i]) == NULL || 5532 dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5533 continue; 5534 5535 nmatched++; 5536 5537 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5538 break; 5539 } 5540 5541 return (nmatched); 5542 } 5543 5544 /* 5545 * If we selected a hash table, iterate over each probe of the same key 5546 * name and invoke the callback for every probe that matches the other 5547 * attributes of our input probe key. 5548 */ 5549 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 5550 probe = *(DTRACE_HASHNEXT(hash, probe))) { 5551 5552 if (dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5553 continue; 5554 5555 nmatched++; 5556 5557 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5558 break; 5559 } 5560 5561 return (nmatched); 5562 } 5563 5564 /* 5565 * Return the function pointer dtrace_probecmp() should use to compare the 5566 * specified pattern with a string. For NULL or empty patterns, we select 5567 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 5568 * For non-empty non-glob strings, we use dtrace_match_string(). 5569 */ 5570 static dtrace_probekey_f * 5571 dtrace_probekey_func(const char *p) 5572 { 5573 char c; 5574 5575 if (p == NULL || *p == '\0') 5576 return (&dtrace_match_nul); 5577 5578 while ((c = *p++) != '\0') { 5579 if (c == '[' || c == '?' || c == '*' || c == '\\') 5580 return (&dtrace_match_glob); 5581 } 5582 5583 return (&dtrace_match_string); 5584 } 5585 5586 /* 5587 * Build a probe comparison key for use with dtrace_match_probe() from the 5588 * given probe description. By convention, a null key only matches anchored 5589 * probes: if each field is the empty string, reset dtpk_fmatch to 5590 * dtrace_match_nonzero(). 5591 */ 5592 static void 5593 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 5594 { 5595 pkp->dtpk_prov = pdp->dtpd_provider; 5596 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 5597 5598 pkp->dtpk_mod = pdp->dtpd_mod; 5599 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 5600 5601 pkp->dtpk_func = pdp->dtpd_func; 5602 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 5603 5604 pkp->dtpk_name = pdp->dtpd_name; 5605 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 5606 5607 pkp->dtpk_id = pdp->dtpd_id; 5608 5609 if (pkp->dtpk_id == DTRACE_IDNONE && 5610 pkp->dtpk_pmatch == &dtrace_match_nul && 5611 pkp->dtpk_mmatch == &dtrace_match_nul && 5612 pkp->dtpk_fmatch == &dtrace_match_nul && 5613 pkp->dtpk_nmatch == &dtrace_match_nul) 5614 pkp->dtpk_fmatch = &dtrace_match_nonzero; 5615 } 5616 5617 /* 5618 * DTrace Provider-to-Framework API Functions 5619 * 5620 * These functions implement much of the Provider-to-Framework API, as 5621 * described in <sys/dtrace.h>. The parts of the API not in this section are 5622 * the functions in the API for probe management (found below), and 5623 * dtrace_probe() itself (found above). 5624 */ 5625 5626 /* 5627 * Register the calling provider with the DTrace framework. This should 5628 * generally be called by DTrace providers in their attach(9E) entry point. 5629 */ 5630 int 5631 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 5632 uid_t uid, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 5633 { 5634 dtrace_provider_t *provider; 5635 5636 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 5637 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5638 "arguments", name ? name : "<NULL>"); 5639 return (EINVAL); 5640 } 5641 5642 if (name[0] == '\0' || dtrace_badname(name)) { 5643 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5644 "provider name", name); 5645 return (EINVAL); 5646 } 5647 5648 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 5649 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 5650 pops->dtps_destroy == NULL || 5651 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 5652 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5653 "provider ops", name); 5654 return (EINVAL); 5655 } 5656 5657 if (dtrace_badattr(&pap->dtpa_provider) || 5658 dtrace_badattr(&pap->dtpa_mod) || 5659 dtrace_badattr(&pap->dtpa_func) || 5660 dtrace_badattr(&pap->dtpa_name) || 5661 dtrace_badattr(&pap->dtpa_args)) { 5662 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5663 "provider attributes", name); 5664 return (EINVAL); 5665 } 5666 5667 if (priv & ~DTRACE_PRIV_ALL) { 5668 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5669 "privilege attributes", name); 5670 return (EINVAL); 5671 } 5672 5673 if ((priv & DTRACE_PRIV_KERNEL) && 5674 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 5675 pops->dtps_usermode == NULL) { 5676 cmn_err(CE_WARN, "failed to register provider '%s': need " 5677 "dtps_usermode() op for given privilege attributes", name); 5678 return (EINVAL); 5679 } 5680 5681 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 5682 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 5683 (void) strcpy(provider->dtpv_name, name); 5684 5685 provider->dtpv_attr = *pap; 5686 provider->dtpv_priv.dtpp_flags = priv; 5687 provider->dtpv_priv.dtpp_uid = uid; 5688 provider->dtpv_pops = *pops; 5689 5690 if (pops->dtps_provide == NULL) { 5691 ASSERT(pops->dtps_provide_module != NULL); 5692 provider->dtpv_pops.dtps_provide = 5693 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 5694 } 5695 5696 if (pops->dtps_provide_module == NULL) { 5697 ASSERT(pops->dtps_provide != NULL); 5698 provider->dtpv_pops.dtps_provide_module = 5699 (void (*)(void *, struct modctl *))dtrace_nullop; 5700 } 5701 5702 if (pops->dtps_suspend == NULL) { 5703 ASSERT(pops->dtps_resume == NULL); 5704 provider->dtpv_pops.dtps_suspend = 5705 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5706 provider->dtpv_pops.dtps_resume = 5707 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5708 } 5709 5710 provider->dtpv_arg = arg; 5711 *idp = (dtrace_provider_id_t)provider; 5712 5713 if (pops == &dtrace_provider_ops) { 5714 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5715 ASSERT(MUTEX_HELD(&dtrace_lock)); 5716 ASSERT(dtrace_anon.dta_enabling == NULL); 5717 5718 /* 5719 * We make sure that the DTrace provider is at the head of 5720 * the provider chain. 5721 */ 5722 provider->dtpv_next = dtrace_provider; 5723 dtrace_provider = provider; 5724 return (0); 5725 } 5726 5727 mutex_enter(&dtrace_provider_lock); 5728 mutex_enter(&dtrace_lock); 5729 5730 /* 5731 * If there is at least one provider registered, we'll add this 5732 * provider after the first provider. 5733 */ 5734 if (dtrace_provider != NULL) { 5735 provider->dtpv_next = dtrace_provider->dtpv_next; 5736 dtrace_provider->dtpv_next = provider; 5737 } else { 5738 dtrace_provider = provider; 5739 } 5740 5741 if (dtrace_retained != NULL) { 5742 dtrace_enabling_provide(provider); 5743 5744 /* 5745 * Now we need to call dtrace_enabling_matchall() -- which 5746 * will acquire cpu_lock and dtrace_lock. We therefore need 5747 * to drop all of our locks before calling into it... 5748 */ 5749 mutex_exit(&dtrace_lock); 5750 mutex_exit(&dtrace_provider_lock); 5751 dtrace_enabling_matchall(); 5752 5753 return (0); 5754 } 5755 5756 mutex_exit(&dtrace_lock); 5757 mutex_exit(&dtrace_provider_lock); 5758 5759 return (0); 5760 } 5761 5762 /* 5763 * Unregister the specified provider from the DTrace framework. This should 5764 * generally be called by DTrace providers in their detach(9E) entry point. 5765 */ 5766 int 5767 dtrace_unregister(dtrace_provider_id_t id) 5768 { 5769 dtrace_provider_t *old = (dtrace_provider_t *)id; 5770 dtrace_provider_t *prev = NULL; 5771 int i, self = 0; 5772 dtrace_probe_t *probe, *first = NULL; 5773 5774 if (old->dtpv_pops.dtps_enable == 5775 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 5776 /* 5777 * If DTrace itself is the provider, we're called with locks 5778 * already held. 5779 */ 5780 ASSERT(old == dtrace_provider); 5781 ASSERT(dtrace_devi != NULL); 5782 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5783 ASSERT(MUTEX_HELD(&dtrace_lock)); 5784 self = 1; 5785 5786 if (dtrace_provider->dtpv_next != NULL) { 5787 /* 5788 * There's another provider here; return failure. 5789 */ 5790 return (EBUSY); 5791 } 5792 } else { 5793 mutex_enter(&dtrace_provider_lock); 5794 mutex_enter(&mod_lock); 5795 mutex_enter(&dtrace_lock); 5796 } 5797 5798 /* 5799 * If anyone has /dev/dtrace open, or if there are anonymous enabled 5800 * probes, we refuse to let providers slither away, unless this 5801 * provider has already been explicitly invalidated. 5802 */ 5803 if (!old->dtpv_defunct && 5804 (dtrace_opens || (dtrace_anon.dta_state != NULL && 5805 dtrace_anon.dta_state->dts_necbs > 0))) { 5806 if (!self) { 5807 mutex_exit(&dtrace_lock); 5808 mutex_exit(&mod_lock); 5809 mutex_exit(&dtrace_provider_lock); 5810 } 5811 return (EBUSY); 5812 } 5813 5814 /* 5815 * Attempt to destroy the probes associated with this provider. 5816 */ 5817 for (i = 0; i < dtrace_nprobes; i++) { 5818 if ((probe = dtrace_probes[i]) == NULL) 5819 continue; 5820 5821 if (probe->dtpr_provider != old) 5822 continue; 5823 5824 if (probe->dtpr_ecb == NULL) 5825 continue; 5826 5827 /* 5828 * We have at least one ECB; we can't remove this provider. 5829 */ 5830 if (!self) { 5831 mutex_exit(&dtrace_lock); 5832 mutex_exit(&mod_lock); 5833 mutex_exit(&dtrace_provider_lock); 5834 } 5835 return (EBUSY); 5836 } 5837 5838 /* 5839 * All of the probes for this provider are disabled; we can safely 5840 * remove all of them from their hash chains and from the probe array. 5841 */ 5842 for (i = 0; i < dtrace_nprobes; i++) { 5843 if ((probe = dtrace_probes[i]) == NULL) 5844 continue; 5845 5846 if (probe->dtpr_provider != old) 5847 continue; 5848 5849 dtrace_probes[i] = NULL; 5850 5851 dtrace_hash_remove(dtrace_bymod, probe); 5852 dtrace_hash_remove(dtrace_byfunc, probe); 5853 dtrace_hash_remove(dtrace_byname, probe); 5854 5855 if (first == NULL) { 5856 first = probe; 5857 probe->dtpr_nextmod = NULL; 5858 } else { 5859 probe->dtpr_nextmod = first; 5860 first = probe; 5861 } 5862 } 5863 5864 /* 5865 * The provider's probes have been removed from the hash chains and 5866 * from the probe array. Now issue a dtrace_sync() to be sure that 5867 * everyone has cleared out from any probe array processing. 5868 */ 5869 dtrace_sync(); 5870 5871 for (probe = first; probe != NULL; probe = first) { 5872 first = probe->dtpr_nextmod; 5873 5874 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 5875 probe->dtpr_arg); 5876 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 5877 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 5878 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 5879 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 5880 kmem_free(probe, sizeof (dtrace_probe_t)); 5881 } 5882 5883 if ((prev = dtrace_provider) == old) { 5884 ASSERT(self || dtrace_devi == NULL); 5885 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 5886 dtrace_provider = old->dtpv_next; 5887 } else { 5888 while (prev != NULL && prev->dtpv_next != old) 5889 prev = prev->dtpv_next; 5890 5891 if (prev == NULL) { 5892 panic("attempt to unregister non-existent " 5893 "dtrace provider %p\n", (void *)id); 5894 } 5895 5896 prev->dtpv_next = old->dtpv_next; 5897 } 5898 5899 if (!self) { 5900 mutex_exit(&dtrace_lock); 5901 mutex_exit(&mod_lock); 5902 mutex_exit(&dtrace_provider_lock); 5903 } 5904 5905 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 5906 kmem_free(old, sizeof (dtrace_provider_t)); 5907 5908 return (0); 5909 } 5910 5911 /* 5912 * Invalidate the specified provider. All subsequent probe lookups for the 5913 * specified provider will fail, but its probes will not be removed. 5914 */ 5915 void 5916 dtrace_invalidate(dtrace_provider_id_t id) 5917 { 5918 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 5919 5920 ASSERT(pvp->dtpv_pops.dtps_enable != 5921 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 5922 5923 mutex_enter(&dtrace_provider_lock); 5924 mutex_enter(&dtrace_lock); 5925 5926 pvp->dtpv_defunct = 1; 5927 5928 mutex_exit(&dtrace_lock); 5929 mutex_exit(&dtrace_provider_lock); 5930 } 5931 5932 /* 5933 * Indicate whether or not DTrace has attached. 5934 */ 5935 int 5936 dtrace_attached(void) 5937 { 5938 /* 5939 * dtrace_provider will be non-NULL iff the DTrace driver has 5940 * attached. (It's non-NULL because DTrace is always itself a 5941 * provider.) 5942 */ 5943 return (dtrace_provider != NULL); 5944 } 5945 5946 /* 5947 * Remove all the unenabled probes for the given provider. This function is 5948 * not unlike dtrace_unregister(), except that it doesn't remove the provider 5949 * -- just as many of its associated probes as it can. 5950 */ 5951 int 5952 dtrace_condense(dtrace_provider_id_t id) 5953 { 5954 dtrace_provider_t *prov = (dtrace_provider_t *)id; 5955 int i; 5956 dtrace_probe_t *probe; 5957 5958 /* 5959 * Make sure this isn't the dtrace provider itself. 5960 */ 5961 ASSERT(prov->dtpv_pops.dtps_enable != 5962 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 5963 5964 mutex_enter(&dtrace_provider_lock); 5965 mutex_enter(&dtrace_lock); 5966 5967 /* 5968 * Attempt to destroy the probes associated with this provider. 5969 */ 5970 for (i = 0; i < dtrace_nprobes; i++) { 5971 if ((probe = dtrace_probes[i]) == NULL) 5972 continue; 5973 5974 if (probe->dtpr_provider != prov) 5975 continue; 5976 5977 if (probe->dtpr_ecb != NULL) 5978 continue; 5979 5980 dtrace_probes[i] = NULL; 5981 5982 dtrace_hash_remove(dtrace_bymod, probe); 5983 dtrace_hash_remove(dtrace_byfunc, probe); 5984 dtrace_hash_remove(dtrace_byname, probe); 5985 5986 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 5987 probe->dtpr_arg); 5988 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 5989 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 5990 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 5991 kmem_free(probe, sizeof (dtrace_probe_t)); 5992 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 5993 } 5994 5995 mutex_exit(&dtrace_lock); 5996 mutex_exit(&dtrace_provider_lock); 5997 5998 return (0); 5999 } 6000 6001 /* 6002 * DTrace Probe Management Functions 6003 * 6004 * The functions in this section perform the DTrace probe management, 6005 * including functions to create probes, look-up probes, and call into the 6006 * providers to request that probes be provided. Some of these functions are 6007 * in the Provider-to-Framework API; these functions can be identified by the 6008 * fact that they are not declared "static". 6009 */ 6010 6011 /* 6012 * Create a probe with the specified module name, function name, and name. 6013 */ 6014 dtrace_id_t 6015 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6016 const char *func, const char *name, int aframes, void *arg) 6017 { 6018 dtrace_probe_t *probe, **probes; 6019 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6020 dtrace_id_t id; 6021 6022 if (provider == dtrace_provider) { 6023 ASSERT(MUTEX_HELD(&dtrace_lock)); 6024 } else { 6025 mutex_enter(&dtrace_lock); 6026 } 6027 6028 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6029 VM_BESTFIT | VM_SLEEP); 6030 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6031 6032 probe->dtpr_id = id; 6033 probe->dtpr_gen = dtrace_probegen++; 6034 probe->dtpr_mod = dtrace_strdup(mod); 6035 probe->dtpr_func = dtrace_strdup(func); 6036 probe->dtpr_name = dtrace_strdup(name); 6037 probe->dtpr_arg = arg; 6038 probe->dtpr_aframes = aframes; 6039 probe->dtpr_provider = provider; 6040 6041 dtrace_hash_add(dtrace_bymod, probe); 6042 dtrace_hash_add(dtrace_byfunc, probe); 6043 dtrace_hash_add(dtrace_byname, probe); 6044 6045 if (id - 1 >= dtrace_nprobes) { 6046 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6047 size_t nsize = osize << 1; 6048 6049 if (nsize == 0) { 6050 ASSERT(osize == 0); 6051 ASSERT(dtrace_probes == NULL); 6052 nsize = sizeof (dtrace_probe_t *); 6053 } 6054 6055 probes = kmem_zalloc(nsize, KM_SLEEP); 6056 6057 if (dtrace_probes == NULL) { 6058 ASSERT(osize == 0); 6059 dtrace_probes = probes; 6060 dtrace_nprobes = 1; 6061 } else { 6062 dtrace_probe_t **oprobes = dtrace_probes; 6063 6064 bcopy(oprobes, probes, osize); 6065 dtrace_membar_producer(); 6066 dtrace_probes = probes; 6067 6068 dtrace_sync(); 6069 6070 /* 6071 * All CPUs are now seeing the new probes array; we can 6072 * safely free the old array. 6073 */ 6074 kmem_free(oprobes, osize); 6075 dtrace_nprobes <<= 1; 6076 } 6077 6078 ASSERT(id - 1 < dtrace_nprobes); 6079 } 6080 6081 ASSERT(dtrace_probes[id - 1] == NULL); 6082 dtrace_probes[id - 1] = probe; 6083 6084 if (provider != dtrace_provider) 6085 mutex_exit(&dtrace_lock); 6086 6087 return (id); 6088 } 6089 6090 static dtrace_probe_t * 6091 dtrace_probe_lookup_id(dtrace_id_t id) 6092 { 6093 ASSERT(MUTEX_HELD(&dtrace_lock)); 6094 6095 if (id == 0 || id > dtrace_nprobes) 6096 return (NULL); 6097 6098 return (dtrace_probes[id - 1]); 6099 } 6100 6101 static int 6102 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6103 { 6104 *((dtrace_id_t *)arg) = probe->dtpr_id; 6105 6106 return (DTRACE_MATCH_DONE); 6107 } 6108 6109 /* 6110 * Look up a probe based on provider and one or more of module name, function 6111 * name and probe name. 6112 */ 6113 dtrace_id_t 6114 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6115 const char *func, const char *name) 6116 { 6117 dtrace_probekey_t pkey; 6118 dtrace_id_t id; 6119 int match; 6120 6121 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6122 pkey.dtpk_pmatch = &dtrace_match_string; 6123 pkey.dtpk_mod = mod; 6124 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6125 pkey.dtpk_func = func; 6126 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6127 pkey.dtpk_name = name; 6128 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6129 pkey.dtpk_id = DTRACE_IDNONE; 6130 6131 mutex_enter(&dtrace_lock); 6132 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 6133 dtrace_probe_lookup_match, &id); 6134 mutex_exit(&dtrace_lock); 6135 6136 ASSERT(match == 1 || match == 0); 6137 return (match ? id : 0); 6138 } 6139 6140 /* 6141 * Returns the probe argument associated with the specified probe. 6142 */ 6143 void * 6144 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6145 { 6146 dtrace_probe_t *probe; 6147 void *rval = NULL; 6148 6149 mutex_enter(&dtrace_lock); 6150 6151 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6152 probe->dtpr_provider == (dtrace_provider_t *)id) 6153 rval = probe->dtpr_arg; 6154 6155 mutex_exit(&dtrace_lock); 6156 6157 return (rval); 6158 } 6159 6160 /* 6161 * Copy a probe into a probe description. 6162 */ 6163 static void 6164 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6165 { 6166 bzero(pdp, sizeof (dtrace_probedesc_t)); 6167 pdp->dtpd_id = prp->dtpr_id; 6168 6169 (void) strncpy(pdp->dtpd_provider, 6170 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6171 6172 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6173 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6174 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6175 } 6176 6177 /* 6178 * Called to indicate that a probe -- or probes -- should be provided by a 6179 * specfied provider. If the specified description is NULL, the provider will 6180 * be told to provide all of its probes. (This is done whenever a new 6181 * consumer comes along, or whenever a retained enabling is to be matched.) If 6182 * the specified description is non-NULL, the provider is given the 6183 * opportunity to dynamically provide the specified probe, allowing providers 6184 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6185 * probes.) If the provider is NULL, the operations will be applied to all 6186 * providers; if the provider is non-NULL the operations will only be applied 6187 * to the specified provider. The dtrace_provider_lock must be held, and the 6188 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6189 * will need to grab the dtrace_lock when it reenters the framework through 6190 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6191 */ 6192 static void 6193 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 6194 { 6195 struct modctl *ctl; 6196 int all = 0; 6197 6198 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6199 6200 if (prv == NULL) { 6201 all = 1; 6202 prv = dtrace_provider; 6203 } 6204 6205 do { 6206 /* 6207 * First, call the blanket provide operation. 6208 */ 6209 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 6210 6211 /* 6212 * Now call the per-module provide operation. We will grab 6213 * mod_lock to prevent the list from being modified. Note 6214 * that this also prevents the mod_busy bits from changing. 6215 * (mod_busy can only be changed with mod_lock held.) 6216 */ 6217 mutex_enter(&mod_lock); 6218 6219 ctl = &modules; 6220 do { 6221 if (ctl->mod_busy || ctl->mod_mp == NULL) 6222 continue; 6223 6224 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 6225 6226 } while ((ctl = ctl->mod_next) != &modules); 6227 6228 mutex_exit(&mod_lock); 6229 } while (all && (prv = prv->dtpv_next) != NULL); 6230 } 6231 6232 /* 6233 * Iterate over each probe, and call the Framework-to-Provider API function 6234 * denoted by offs. 6235 */ 6236 static void 6237 dtrace_probe_foreach(uintptr_t offs) 6238 { 6239 dtrace_provider_t *prov; 6240 void (*func)(void *, dtrace_id_t, void *); 6241 dtrace_probe_t *probe; 6242 dtrace_icookie_t cookie; 6243 int i; 6244 6245 /* 6246 * We disable interrupts to walk through the probe array. This is 6247 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 6248 * won't see stale data. 6249 */ 6250 cookie = dtrace_interrupt_disable(); 6251 6252 for (i = 0; i < dtrace_nprobes; i++) { 6253 if ((probe = dtrace_probes[i]) == NULL) 6254 continue; 6255 6256 if (probe->dtpr_ecb == NULL) { 6257 /* 6258 * This probe isn't enabled -- don't call the function. 6259 */ 6260 continue; 6261 } 6262 6263 prov = probe->dtpr_provider; 6264 func = *((void(**)(void *, dtrace_id_t, void *)) 6265 ((uintptr_t)&prov->dtpv_pops + offs)); 6266 6267 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 6268 } 6269 6270 dtrace_interrupt_enable(cookie); 6271 } 6272 6273 static int 6274 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 6275 { 6276 dtrace_probekey_t pkey; 6277 uint32_t priv; 6278 uid_t uid; 6279 6280 ASSERT(MUTEX_HELD(&dtrace_lock)); 6281 dtrace_ecb_create_cache = NULL; 6282 6283 if (desc == NULL) { 6284 /* 6285 * If we're passed a NULL description, we're being asked to 6286 * create an ECB with a NULL probe. 6287 */ 6288 (void) dtrace_ecb_create_enable(NULL, enab); 6289 return (0); 6290 } 6291 6292 dtrace_probekey(desc, &pkey); 6293 dtrace_cred2priv(CRED(), &priv, &uid); 6294 6295 return (dtrace_match(&pkey, priv, uid, dtrace_ecb_create_enable, enab)); 6296 } 6297 6298 /* 6299 * DTrace Helper Provider Functions 6300 */ 6301 static void 6302 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 6303 { 6304 attr->dtat_name = DOF_ATTR_NAME(dofattr); 6305 attr->dtat_data = DOF_ATTR_DATA(dofattr); 6306 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 6307 } 6308 6309 static void 6310 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 6311 const dof_provider_t *dofprov, char *strtab) 6312 { 6313 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 6314 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 6315 dofprov->dofpv_provattr); 6316 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 6317 dofprov->dofpv_modattr); 6318 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 6319 dofprov->dofpv_funcattr); 6320 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 6321 dofprov->dofpv_nameattr); 6322 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 6323 dofprov->dofpv_argsattr); 6324 } 6325 6326 static void 6327 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6328 { 6329 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6330 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6331 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 6332 dof_provider_t *provider; 6333 dof_probe_t *probe; 6334 uint32_t *off; 6335 uint8_t *arg; 6336 char *strtab; 6337 uint_t i, nprobes; 6338 dtrace_helper_provdesc_t dhpv; 6339 dtrace_helper_probedesc_t dhpb; 6340 dtrace_meta_t *meta = dtrace_meta_pid; 6341 dtrace_mops_t *mops = &meta->dtm_mops; 6342 void *parg; 6343 6344 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6345 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6346 provider->dofpv_strtab * dof->dofh_secsize); 6347 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6348 provider->dofpv_probes * dof->dofh_secsize); 6349 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6350 provider->dofpv_prargs * dof->dofh_secsize); 6351 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6352 provider->dofpv_proffs * dof->dofh_secsize); 6353 6354 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6355 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 6356 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 6357 6358 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 6359 6360 /* 6361 * Create the provider. 6362 */ 6363 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6364 6365 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 6366 return; 6367 6368 meta->dtm_count++; 6369 6370 /* 6371 * Create the probes. 6372 */ 6373 for (i = 0; i < nprobes; i++) { 6374 probe = (dof_probe_t *)(uintptr_t)(daddr + 6375 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 6376 6377 dhpb.dthpb_mod = dhp->dofhp_mod; 6378 dhpb.dthpb_func = strtab + probe->dofpr_func; 6379 dhpb.dthpb_name = strtab + probe->dofpr_name; 6380 dhpb.dthpb_base = probe->dofpr_addr; 6381 dhpb.dthpb_offs = off + probe->dofpr_offidx; 6382 dhpb.dthpb_noffs = probe->dofpr_noffs; 6383 dhpb.dthpb_args = arg + probe->dofpr_argidx; 6384 dhpb.dthpb_nargc = probe->dofpr_nargc; 6385 dhpb.dthpb_xargc = probe->dofpr_xargc; 6386 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 6387 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 6388 6389 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 6390 } 6391 } 6392 6393 static void 6394 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 6395 { 6396 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6397 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6398 int i; 6399 6400 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6401 6402 for (i = 0; i < dof->dofh_secnum; i++) { 6403 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6404 dof->dofh_secoff + i * dof->dofh_secsize); 6405 6406 if (sec->dofs_type != DOF_SECT_PROVIDER) 6407 continue; 6408 6409 dtrace_helper_provide_one(dhp, sec, pid); 6410 } 6411 } 6412 6413 static void 6414 dtrace_helper_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6415 { 6416 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6417 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6418 dof_sec_t *str_sec; 6419 dof_provider_t *provider; 6420 char *strtab; 6421 dtrace_helper_provdesc_t dhpv; 6422 dtrace_meta_t *meta = dtrace_meta_pid; 6423 dtrace_mops_t *mops = &meta->dtm_mops; 6424 6425 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6426 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6427 provider->dofpv_strtab * dof->dofh_secsize); 6428 6429 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6430 6431 /* 6432 * Create the provider. 6433 */ 6434 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6435 6436 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 6437 6438 meta->dtm_count--; 6439 } 6440 6441 static void 6442 dtrace_helper_remove(dof_helper_t *dhp, pid_t pid) 6443 { 6444 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6445 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6446 int i; 6447 6448 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6449 6450 for (i = 0; i < dof->dofh_secnum; i++) { 6451 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6452 dof->dofh_secoff + i * dof->dofh_secsize); 6453 6454 if (sec->dofs_type != DOF_SECT_PROVIDER) 6455 continue; 6456 6457 dtrace_helper_remove_one(dhp, sec, pid); 6458 } 6459 } 6460 6461 /* 6462 * DTrace Meta Provider-to-Framework API Functions 6463 * 6464 * These functions implement the Meta Provider-to-Framework API, as described 6465 * in <sys/dtrace.h>. 6466 */ 6467 int 6468 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 6469 dtrace_meta_provider_id_t *idp) 6470 { 6471 dtrace_meta_t *meta; 6472 dtrace_helpers_t *help, *next; 6473 int i; 6474 6475 *idp = DTRACE_METAPROVNONE; 6476 6477 /* 6478 * We strictly don't need the name, but we hold onto it for 6479 * debuggability. All hail error queues! 6480 */ 6481 if (name == NULL) { 6482 cmn_err(CE_WARN, "failed to register meta-provider: " 6483 "invalid name"); 6484 return (EINVAL); 6485 } 6486 6487 if (mops == NULL || 6488 mops->dtms_create_probe == NULL || 6489 mops->dtms_provide_pid == NULL || 6490 mops->dtms_remove_pid == NULL) { 6491 cmn_err(CE_WARN, "failed to register meta-register %s: " 6492 "invalid ops", name); 6493 return (EINVAL); 6494 } 6495 6496 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 6497 meta->dtm_mops = *mops; 6498 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6499 (void) strcpy(meta->dtm_name, name); 6500 meta->dtm_arg = arg; 6501 6502 mutex_enter(&dtrace_meta_lock); 6503 mutex_enter(&dtrace_lock); 6504 6505 if (dtrace_meta_pid != NULL) { 6506 mutex_exit(&dtrace_lock); 6507 mutex_exit(&dtrace_meta_lock); 6508 cmn_err(CE_WARN, "failed to register meta-register %s: " 6509 "user-land meta-provider exists", name); 6510 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 6511 kmem_free(meta, sizeof (dtrace_meta_t)); 6512 return (EINVAL); 6513 } 6514 6515 dtrace_meta_pid = meta; 6516 *idp = (dtrace_meta_provider_id_t)meta; 6517 6518 /* 6519 * If there are providers and probes ready to go, pass them 6520 * off to the new meta provider now. 6521 */ 6522 6523 help = dtrace_deferred_pid; 6524 dtrace_deferred_pid = NULL; 6525 6526 mutex_exit(&dtrace_lock); 6527 6528 while (help != NULL) { 6529 for (i = 0; i < help->dthps_nprovs; i++) { 6530 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 6531 help->dthps_pid); 6532 } 6533 6534 next = help->dthps_next; 6535 help->dthps_next = NULL; 6536 help->dthps_prev = NULL; 6537 help = next; 6538 } 6539 6540 mutex_exit(&dtrace_meta_lock); 6541 6542 return (0); 6543 } 6544 6545 int 6546 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 6547 { 6548 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 6549 6550 mutex_enter(&dtrace_meta_lock); 6551 mutex_enter(&dtrace_lock); 6552 6553 if (old == dtrace_meta_pid) { 6554 pp = &dtrace_meta_pid; 6555 } else { 6556 panic("attempt to unregister non-existent " 6557 "dtrace meta-provider %p\n", (void *)old); 6558 } 6559 6560 if (old->dtm_count != 0) { 6561 mutex_exit(&dtrace_lock); 6562 mutex_exit(&dtrace_meta_lock); 6563 return (EBUSY); 6564 } 6565 6566 *pp = NULL; 6567 6568 mutex_exit(&dtrace_lock); 6569 mutex_exit(&dtrace_meta_lock); 6570 6571 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 6572 kmem_free(old, sizeof (dtrace_meta_t)); 6573 6574 return (0); 6575 } 6576 6577 6578 /* 6579 * DTrace DIF Object Functions 6580 */ 6581 static int 6582 dtrace_difo_err(uint_t pc, const char *format, ...) 6583 { 6584 if (dtrace_err_verbose) { 6585 va_list alist; 6586 6587 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 6588 va_start(alist, format); 6589 (void) vuprintf(format, alist); 6590 va_end(alist); 6591 } 6592 6593 #ifdef DTRACE_ERRDEBUG 6594 dtrace_errdebug(format); 6595 #endif 6596 return (1); 6597 } 6598 6599 /* 6600 * Validate a DTrace DIF object by checking the IR instructions. The following 6601 * rules are currently enforced by dtrace_difo_validate(): 6602 * 6603 * 1. Each instruction must have a valid opcode 6604 * 2. Each register, string, variable, or subroutine reference must be valid 6605 * 3. No instruction can modify register %r0 (must be zero) 6606 * 4. All instruction reserved bits must be set to zero 6607 * 5. The last instruction must be a "ret" instruction 6608 * 6. All branch targets must reference a valid instruction _after_ the branch 6609 */ 6610 static int 6611 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 6612 cred_t *cr) 6613 { 6614 int err = 0, i; 6615 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 6616 int kcheck; 6617 uint_t pc; 6618 6619 kcheck = cr == NULL || 6620 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0; 6621 6622 dp->dtdo_destructive = 0; 6623 6624 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 6625 dif_instr_t instr = dp->dtdo_buf[pc]; 6626 6627 uint_t r1 = DIF_INSTR_R1(instr); 6628 uint_t r2 = DIF_INSTR_R2(instr); 6629 uint_t rd = DIF_INSTR_RD(instr); 6630 uint_t rs = DIF_INSTR_RS(instr); 6631 uint_t label = DIF_INSTR_LABEL(instr); 6632 uint_t v = DIF_INSTR_VAR(instr); 6633 uint_t subr = DIF_INSTR_SUBR(instr); 6634 uint_t type = DIF_INSTR_TYPE(instr); 6635 uint_t op = DIF_INSTR_OP(instr); 6636 6637 switch (op) { 6638 case DIF_OP_OR: 6639 case DIF_OP_XOR: 6640 case DIF_OP_AND: 6641 case DIF_OP_SLL: 6642 case DIF_OP_SRL: 6643 case DIF_OP_SRA: 6644 case DIF_OP_SUB: 6645 case DIF_OP_ADD: 6646 case DIF_OP_MUL: 6647 case DIF_OP_SDIV: 6648 case DIF_OP_UDIV: 6649 case DIF_OP_SREM: 6650 case DIF_OP_UREM: 6651 case DIF_OP_COPYS: 6652 if (r1 >= nregs) 6653 err += efunc(pc, "invalid register %u\n", r1); 6654 if (r2 >= nregs) 6655 err += efunc(pc, "invalid register %u\n", r2); 6656 if (rd >= nregs) 6657 err += efunc(pc, "invalid register %u\n", rd); 6658 if (rd == 0) 6659 err += efunc(pc, "cannot write to %r0\n"); 6660 break; 6661 case DIF_OP_NOT: 6662 case DIF_OP_MOV: 6663 case DIF_OP_ALLOCS: 6664 if (r1 >= nregs) 6665 err += efunc(pc, "invalid register %u\n", r1); 6666 if (r2 != 0) 6667 err += efunc(pc, "non-zero reserved bits\n"); 6668 if (rd >= nregs) 6669 err += efunc(pc, "invalid register %u\n", rd); 6670 if (rd == 0) 6671 err += efunc(pc, "cannot write to %r0\n"); 6672 break; 6673 case DIF_OP_LDSB: 6674 case DIF_OP_LDSH: 6675 case DIF_OP_LDSW: 6676 case DIF_OP_LDUB: 6677 case DIF_OP_LDUH: 6678 case DIF_OP_LDUW: 6679 case DIF_OP_LDX: 6680 if (r1 >= nregs) 6681 err += efunc(pc, "invalid register %u\n", r1); 6682 if (r2 != 0) 6683 err += efunc(pc, "non-zero reserved bits\n"); 6684 if (rd >= nregs) 6685 err += efunc(pc, "invalid register %u\n", rd); 6686 if (rd == 0) 6687 err += efunc(pc, "cannot write to %r0\n"); 6688 if (kcheck) 6689 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 6690 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 6691 break; 6692 case DIF_OP_RLDSB: 6693 case DIF_OP_RLDSH: 6694 case DIF_OP_RLDSW: 6695 case DIF_OP_RLDUB: 6696 case DIF_OP_RLDUH: 6697 case DIF_OP_RLDUW: 6698 case DIF_OP_RLDX: 6699 if (r1 >= nregs) 6700 err += efunc(pc, "invalid register %u\n", r1); 6701 if (r2 != 0) 6702 err += efunc(pc, "non-zero reserved bits\n"); 6703 if (rd >= nregs) 6704 err += efunc(pc, "invalid register %u\n", rd); 6705 if (rd == 0) 6706 err += efunc(pc, "cannot write to %r0\n"); 6707 break; 6708 case DIF_OP_ULDSB: 6709 case DIF_OP_ULDSH: 6710 case DIF_OP_ULDSW: 6711 case DIF_OP_ULDUB: 6712 case DIF_OP_ULDUH: 6713 case DIF_OP_ULDUW: 6714 case DIF_OP_ULDX: 6715 if (r1 >= nregs) 6716 err += efunc(pc, "invalid register %u\n", r1); 6717 if (r2 != 0) 6718 err += efunc(pc, "non-zero reserved bits\n"); 6719 if (rd >= nregs) 6720 err += efunc(pc, "invalid register %u\n", rd); 6721 if (rd == 0) 6722 err += efunc(pc, "cannot write to %r0\n"); 6723 break; 6724 case DIF_OP_STB: 6725 case DIF_OP_STH: 6726 case DIF_OP_STW: 6727 case DIF_OP_STX: 6728 if (r1 >= nregs) 6729 err += efunc(pc, "invalid register %u\n", r1); 6730 if (r2 != 0) 6731 err += efunc(pc, "non-zero reserved bits\n"); 6732 if (rd >= nregs) 6733 err += efunc(pc, "invalid register %u\n", rd); 6734 if (rd == 0) 6735 err += efunc(pc, "cannot write to 0 address\n"); 6736 break; 6737 case DIF_OP_CMP: 6738 case DIF_OP_SCMP: 6739 if (r1 >= nregs) 6740 err += efunc(pc, "invalid register %u\n", r1); 6741 if (r2 >= nregs) 6742 err += efunc(pc, "invalid register %u\n", r2); 6743 if (rd != 0) 6744 err += efunc(pc, "non-zero reserved bits\n"); 6745 break; 6746 case DIF_OP_TST: 6747 if (r1 >= nregs) 6748 err += efunc(pc, "invalid register %u\n", r1); 6749 if (r2 != 0 || rd != 0) 6750 err += efunc(pc, "non-zero reserved bits\n"); 6751 break; 6752 case DIF_OP_BA: 6753 case DIF_OP_BE: 6754 case DIF_OP_BNE: 6755 case DIF_OP_BG: 6756 case DIF_OP_BGU: 6757 case DIF_OP_BGE: 6758 case DIF_OP_BGEU: 6759 case DIF_OP_BL: 6760 case DIF_OP_BLU: 6761 case DIF_OP_BLE: 6762 case DIF_OP_BLEU: 6763 if (label >= dp->dtdo_len) { 6764 err += efunc(pc, "invalid branch target %u\n", 6765 label); 6766 } 6767 if (label <= pc) { 6768 err += efunc(pc, "backward branch to %u\n", 6769 label); 6770 } 6771 break; 6772 case DIF_OP_RET: 6773 if (r1 != 0 || r2 != 0) 6774 err += efunc(pc, "non-zero reserved bits\n"); 6775 if (rd >= nregs) 6776 err += efunc(pc, "invalid register %u\n", rd); 6777 break; 6778 case DIF_OP_NOP: 6779 case DIF_OP_POPTS: 6780 case DIF_OP_FLUSHTS: 6781 if (r1 != 0 || r2 != 0 || rd != 0) 6782 err += efunc(pc, "non-zero reserved bits\n"); 6783 break; 6784 case DIF_OP_SETX: 6785 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 6786 err += efunc(pc, "invalid integer ref %u\n", 6787 DIF_INSTR_INTEGER(instr)); 6788 } 6789 if (rd >= nregs) 6790 err += efunc(pc, "invalid register %u\n", rd); 6791 if (rd == 0) 6792 err += efunc(pc, "cannot write to %r0\n"); 6793 break; 6794 case DIF_OP_SETS: 6795 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 6796 err += efunc(pc, "invalid string ref %u\n", 6797 DIF_INSTR_STRING(instr)); 6798 } 6799 if (rd >= nregs) 6800 err += efunc(pc, "invalid register %u\n", rd); 6801 if (rd == 0) 6802 err += efunc(pc, "cannot write to %r0\n"); 6803 break; 6804 case DIF_OP_LDGA: 6805 case DIF_OP_LDTA: 6806 if (r1 > DIF_VAR_ARRAY_MAX) 6807 err += efunc(pc, "invalid array %u\n", r1); 6808 if (r2 >= nregs) 6809 err += efunc(pc, "invalid register %u\n", r2); 6810 if (rd >= nregs) 6811 err += efunc(pc, "invalid register %u\n", rd); 6812 if (rd == 0) 6813 err += efunc(pc, "cannot write to %r0\n"); 6814 break; 6815 case DIF_OP_LDGS: 6816 case DIF_OP_LDTS: 6817 case DIF_OP_LDLS: 6818 case DIF_OP_LDGAA: 6819 case DIF_OP_LDTAA: 6820 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 6821 err += efunc(pc, "invalid variable %u\n", v); 6822 if (rd >= nregs) 6823 err += efunc(pc, "invalid register %u\n", rd); 6824 if (rd == 0) 6825 err += efunc(pc, "cannot write to %r0\n"); 6826 break; 6827 case DIF_OP_STGS: 6828 case DIF_OP_STTS: 6829 case DIF_OP_STLS: 6830 case DIF_OP_STGAA: 6831 case DIF_OP_STTAA: 6832 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 6833 err += efunc(pc, "invalid variable %u\n", v); 6834 if (rs >= nregs) 6835 err += efunc(pc, "invalid register %u\n", rd); 6836 break; 6837 case DIF_OP_CALL: 6838 if (subr > DIF_SUBR_MAX) 6839 err += efunc(pc, "invalid subr %u\n", subr); 6840 if (rd >= nregs) 6841 err += efunc(pc, "invalid register %u\n", rd); 6842 if (rd == 0) 6843 err += efunc(pc, "cannot write to %r0\n"); 6844 6845 if (subr == DIF_SUBR_COPYOUT || 6846 subr == DIF_SUBR_COPYOUTSTR) { 6847 dp->dtdo_destructive = 1; 6848 } 6849 break; 6850 case DIF_OP_PUSHTR: 6851 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 6852 err += efunc(pc, "invalid ref type %u\n", type); 6853 if (r2 >= nregs) 6854 err += efunc(pc, "invalid register %u\n", r2); 6855 if (rs >= nregs) 6856 err += efunc(pc, "invalid register %u\n", rs); 6857 break; 6858 case DIF_OP_PUSHTV: 6859 if (type != DIF_TYPE_CTF) 6860 err += efunc(pc, "invalid val type %u\n", type); 6861 if (r2 >= nregs) 6862 err += efunc(pc, "invalid register %u\n", r2); 6863 if (rs >= nregs) 6864 err += efunc(pc, "invalid register %u\n", rs); 6865 break; 6866 default: 6867 err += efunc(pc, "invalid opcode %u\n", 6868 DIF_INSTR_OP(instr)); 6869 } 6870 } 6871 6872 if (dp->dtdo_len != 0 && 6873 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 6874 err += efunc(dp->dtdo_len - 1, 6875 "expected 'ret' as last DIF instruction\n"); 6876 } 6877 6878 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 6879 /* 6880 * If we're not returning by reference, the size must be either 6881 * 0 or the size of one of the base types. 6882 */ 6883 switch (dp->dtdo_rtype.dtdt_size) { 6884 case 0: 6885 case sizeof (uint8_t): 6886 case sizeof (uint16_t): 6887 case sizeof (uint32_t): 6888 case sizeof (uint64_t): 6889 break; 6890 6891 default: 6892 err += efunc(dp->dtdo_len - 1, "bad return size"); 6893 } 6894 } 6895 6896 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 6897 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 6898 dtrace_diftype_t *vt, *et; 6899 uint_t id, ndx; 6900 6901 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 6902 v->dtdv_scope != DIFV_SCOPE_THREAD && 6903 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 6904 err += efunc(i, "unrecognized variable scope %d\n", 6905 v->dtdv_scope); 6906 break; 6907 } 6908 6909 if (v->dtdv_kind != DIFV_KIND_ARRAY && 6910 v->dtdv_kind != DIFV_KIND_SCALAR) { 6911 err += efunc(i, "unrecognized variable type %d\n", 6912 v->dtdv_kind); 6913 break; 6914 } 6915 6916 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 6917 err += efunc(i, "%d exceeds variable id limit\n", id); 6918 break; 6919 } 6920 6921 if (id < DIF_VAR_OTHER_UBASE) 6922 continue; 6923 6924 /* 6925 * For user-defined variables, we need to check that this 6926 * definition is identical to any previous definition that we 6927 * encountered. 6928 */ 6929 ndx = id - DIF_VAR_OTHER_UBASE; 6930 6931 switch (v->dtdv_scope) { 6932 case DIFV_SCOPE_GLOBAL: 6933 if (ndx < vstate->dtvs_nglobals) { 6934 dtrace_statvar_t *svar; 6935 6936 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 6937 existing = &svar->dtsv_var; 6938 } 6939 6940 break; 6941 6942 case DIFV_SCOPE_THREAD: 6943 if (ndx < vstate->dtvs_ntlocals) 6944 existing = &vstate->dtvs_tlocals[ndx]; 6945 break; 6946 6947 case DIFV_SCOPE_LOCAL: 6948 if (ndx < vstate->dtvs_nlocals) { 6949 dtrace_statvar_t *svar; 6950 6951 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 6952 existing = &svar->dtsv_var; 6953 } 6954 6955 break; 6956 } 6957 6958 vt = &v->dtdv_type; 6959 6960 if (vt->dtdt_flags & DIF_TF_BYREF) { 6961 if (vt->dtdt_size == 0) { 6962 err += efunc(i, "zero-sized variable\n"); 6963 break; 6964 } 6965 6966 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 6967 vt->dtdt_size > dtrace_global_maxsize) { 6968 err += efunc(i, "oversized by-ref global\n"); 6969 break; 6970 } 6971 } 6972 6973 if (existing == NULL || existing->dtdv_id == 0) 6974 continue; 6975 6976 ASSERT(existing->dtdv_id == v->dtdv_id); 6977 ASSERT(existing->dtdv_scope == v->dtdv_scope); 6978 6979 if (existing->dtdv_kind != v->dtdv_kind) 6980 err += efunc(i, "%d changed variable kind\n", id); 6981 6982 et = &existing->dtdv_type; 6983 6984 if (vt->dtdt_flags != et->dtdt_flags) { 6985 err += efunc(i, "%d changed variable type flags\n", id); 6986 break; 6987 } 6988 6989 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 6990 err += efunc(i, "%d changed variable type size\n", id); 6991 break; 6992 } 6993 } 6994 6995 return (err); 6996 } 6997 6998 /* 6999 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7000 * are much more constrained than normal DIFOs. Specifically, they may 7001 * not: 7002 * 7003 * 1. Make calls to subroutines other than copyin() or copyinstr(). 7004 * 2. Access DTrace variables other than the args[] array, and the 7005 * curthread, pid, tid and execname variables. 7006 * 3. Have thread-local variables. 7007 * 4. Have dynamic variables. 7008 */ 7009 static int 7010 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7011 { 7012 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7013 int err = 0; 7014 uint_t pc; 7015 7016 for (pc = 0; pc < dp->dtdo_len; pc++) { 7017 dif_instr_t instr = dp->dtdo_buf[pc]; 7018 7019 uint_t v = DIF_INSTR_VAR(instr); 7020 uint_t subr = DIF_INSTR_SUBR(instr); 7021 uint_t op = DIF_INSTR_OP(instr); 7022 7023 switch (op) { 7024 case DIF_OP_OR: 7025 case DIF_OP_XOR: 7026 case DIF_OP_AND: 7027 case DIF_OP_SLL: 7028 case DIF_OP_SRL: 7029 case DIF_OP_SRA: 7030 case DIF_OP_SUB: 7031 case DIF_OP_ADD: 7032 case DIF_OP_MUL: 7033 case DIF_OP_SDIV: 7034 case DIF_OP_UDIV: 7035 case DIF_OP_SREM: 7036 case DIF_OP_UREM: 7037 case DIF_OP_COPYS: 7038 case DIF_OP_NOT: 7039 case DIF_OP_MOV: 7040 case DIF_OP_RLDSB: 7041 case DIF_OP_RLDSH: 7042 case DIF_OP_RLDSW: 7043 case DIF_OP_RLDUB: 7044 case DIF_OP_RLDUH: 7045 case DIF_OP_RLDUW: 7046 case DIF_OP_RLDX: 7047 case DIF_OP_ULDSB: 7048 case DIF_OP_ULDSH: 7049 case DIF_OP_ULDSW: 7050 case DIF_OP_ULDUB: 7051 case DIF_OP_ULDUH: 7052 case DIF_OP_ULDUW: 7053 case DIF_OP_ULDX: 7054 case DIF_OP_STB: 7055 case DIF_OP_STH: 7056 case DIF_OP_STW: 7057 case DIF_OP_STX: 7058 case DIF_OP_ALLOCS: 7059 case DIF_OP_CMP: 7060 case DIF_OP_SCMP: 7061 case DIF_OP_TST: 7062 case DIF_OP_BA: 7063 case DIF_OP_BE: 7064 case DIF_OP_BNE: 7065 case DIF_OP_BG: 7066 case DIF_OP_BGU: 7067 case DIF_OP_BGE: 7068 case DIF_OP_BGEU: 7069 case DIF_OP_BL: 7070 case DIF_OP_BLU: 7071 case DIF_OP_BLE: 7072 case DIF_OP_BLEU: 7073 case DIF_OP_RET: 7074 case DIF_OP_NOP: 7075 case DIF_OP_POPTS: 7076 case DIF_OP_FLUSHTS: 7077 case DIF_OP_SETX: 7078 case DIF_OP_SETS: 7079 case DIF_OP_LDGA: 7080 case DIF_OP_LDLS: 7081 case DIF_OP_STGS: 7082 case DIF_OP_STLS: 7083 case DIF_OP_PUSHTR: 7084 case DIF_OP_PUSHTV: 7085 break; 7086 7087 case DIF_OP_LDGS: 7088 if (v >= DIF_VAR_OTHER_UBASE) 7089 break; 7090 7091 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7092 break; 7093 7094 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7095 v == DIF_VAR_TID || v == DIF_VAR_EXECNAME || 7096 v == DIF_VAR_ZONENAME) 7097 break; 7098 7099 err += efunc(pc, "illegal variable %u\n", v); 7100 break; 7101 7102 case DIF_OP_LDTA: 7103 case DIF_OP_LDTS: 7104 case DIF_OP_LDGAA: 7105 case DIF_OP_LDTAA: 7106 err += efunc(pc, "illegal dynamic variable load\n"); 7107 break; 7108 7109 case DIF_OP_STTS: 7110 case DIF_OP_STGAA: 7111 case DIF_OP_STTAA: 7112 err += efunc(pc, "illegal dynamic variable store\n"); 7113 break; 7114 7115 case DIF_OP_CALL: 7116 if (subr == DIF_SUBR_ALLOCA || 7117 subr == DIF_SUBR_BCOPY || 7118 subr == DIF_SUBR_COPYIN || 7119 subr == DIF_SUBR_COPYINTO || 7120 subr == DIF_SUBR_COPYINSTR) 7121 break; 7122 7123 err += efunc(pc, "invalid subr %u\n", subr); 7124 break; 7125 7126 default: 7127 err += efunc(pc, "invalid opcode %u\n", 7128 DIF_INSTR_OP(instr)); 7129 } 7130 } 7131 7132 return (err); 7133 } 7134 7135 /* 7136 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7137 * basis; 0 if not. 7138 */ 7139 static int 7140 dtrace_difo_cacheable(dtrace_difo_t *dp) 7141 { 7142 int i; 7143 7144 if (dp == NULL) 7145 return (0); 7146 7147 for (i = 0; i < dp->dtdo_varlen; i++) { 7148 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7149 7150 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 7151 continue; 7152 7153 switch (v->dtdv_id) { 7154 case DIF_VAR_CURTHREAD: 7155 case DIF_VAR_PID: 7156 case DIF_VAR_TID: 7157 case DIF_VAR_EXECNAME: 7158 case DIF_VAR_ZONENAME: 7159 break; 7160 7161 default: 7162 return (0); 7163 } 7164 } 7165 7166 /* 7167 * This DIF object may be cacheable. Now we need to look for any 7168 * load variant instructions, or any stores to thread-local variables. 7169 */ 7170 for (i = 0; i < dp->dtdo_len; i++) { 7171 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 7172 7173 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 7174 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 7175 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 7176 (op == DIF_OP_STTS)) 7177 return (0); 7178 } 7179 7180 return (1); 7181 } 7182 7183 static void 7184 dtrace_difo_hold(dtrace_difo_t *dp) 7185 { 7186 int i; 7187 7188 ASSERT(MUTEX_HELD(&dtrace_lock)); 7189 7190 dp->dtdo_refcnt++; 7191 ASSERT(dp->dtdo_refcnt != 0); 7192 7193 /* 7194 * We need to check this DIF object for references to the variable 7195 * DIF_VAR_VTIMESTAMP. 7196 */ 7197 for (i = 0; i < dp->dtdo_varlen; i++) { 7198 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7199 7200 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7201 continue; 7202 7203 if (dtrace_vtime_references++ == 0) 7204 dtrace_vtime_enable(); 7205 } 7206 } 7207 7208 /* 7209 * This routine calculates the dynamic variable chunksize for a given DIF 7210 * object. The calculation is not fool-proof, and can probably be tricked by 7211 * malicious DIF -- but it works for all compiler-generated DIF. Because this 7212 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 7213 * if a dynamic variable size exceeds the chunksize. 7214 */ 7215 static void 7216 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7217 { 7218 uint64_t sval; 7219 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 7220 const dif_instr_t *text = dp->dtdo_buf; 7221 uint_t pc, srd = 0; 7222 uint_t ttop = 0; 7223 size_t size, ksize; 7224 uint_t id, i; 7225 7226 for (pc = 0; pc < dp->dtdo_len; pc++) { 7227 dif_instr_t instr = text[pc]; 7228 uint_t op = DIF_INSTR_OP(instr); 7229 uint_t rd = DIF_INSTR_RD(instr); 7230 uint_t r1 = DIF_INSTR_R1(instr); 7231 uint_t nkeys = 0; 7232 uchar_t scope; 7233 7234 dtrace_key_t *key = tupregs; 7235 7236 switch (op) { 7237 case DIF_OP_SETX: 7238 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 7239 srd = rd; 7240 continue; 7241 7242 case DIF_OP_STTS: 7243 key = &tupregs[DIF_DTR_NREGS]; 7244 key[0].dttk_size = 0; 7245 key[1].dttk_size = 0; 7246 nkeys = 2; 7247 scope = DIFV_SCOPE_THREAD; 7248 break; 7249 7250 case DIF_OP_STGAA: 7251 case DIF_OP_STTAA: 7252 nkeys = ttop; 7253 7254 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 7255 key[nkeys++].dttk_size = 0; 7256 7257 key[nkeys++].dttk_size = 0; 7258 7259 if (op == DIF_OP_STTAA) { 7260 scope = DIFV_SCOPE_THREAD; 7261 } else { 7262 scope = DIFV_SCOPE_GLOBAL; 7263 } 7264 7265 break; 7266 7267 case DIF_OP_PUSHTR: 7268 if (ttop == DIF_DTR_NREGS) 7269 return; 7270 7271 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 7272 /* 7273 * If the register for the size of the "pushtr" 7274 * is %r0 (or the value is 0) and the type is 7275 * a string, we'll use the system-wide default 7276 * string size. 7277 */ 7278 tupregs[ttop++].dttk_size = 7279 dtrace_strsize_default; 7280 } else { 7281 if (srd == 0) 7282 return; 7283 7284 tupregs[ttop++].dttk_size = sval; 7285 } 7286 7287 break; 7288 7289 case DIF_OP_PUSHTV: 7290 if (ttop == DIF_DTR_NREGS) 7291 return; 7292 7293 tupregs[ttop++].dttk_size = 0; 7294 break; 7295 7296 case DIF_OP_FLUSHTS: 7297 ttop = 0; 7298 break; 7299 7300 case DIF_OP_POPTS: 7301 if (ttop != 0) 7302 ttop--; 7303 break; 7304 } 7305 7306 sval = 0; 7307 srd = 0; 7308 7309 if (nkeys == 0) 7310 continue; 7311 7312 /* 7313 * We have a dynamic variable allocation; calculate its size. 7314 */ 7315 for (ksize = 0, i = 0; i < nkeys; i++) 7316 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 7317 7318 size = sizeof (dtrace_dynvar_t); 7319 size += sizeof (dtrace_key_t) * (nkeys - 1); 7320 size += ksize; 7321 7322 /* 7323 * Now we need to determine the size of the stored data. 7324 */ 7325 id = DIF_INSTR_VAR(instr); 7326 7327 for (i = 0; i < dp->dtdo_varlen; i++) { 7328 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7329 7330 if (v->dtdv_id == id && v->dtdv_scope == scope) { 7331 size += v->dtdv_type.dtdt_size; 7332 break; 7333 } 7334 } 7335 7336 if (i == dp->dtdo_varlen) 7337 return; 7338 7339 /* 7340 * We have the size. If this is larger than the chunk size 7341 * for our dynamic variable state, reset the chunk size. 7342 */ 7343 size = P2ROUNDUP(size, sizeof (uint64_t)); 7344 7345 if (size > vstate->dtvs_dynvars.dtds_chunksize) 7346 vstate->dtvs_dynvars.dtds_chunksize = size; 7347 } 7348 } 7349 7350 static void 7351 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7352 { 7353 int i, oldsvars, osz, nsz, otlocals, ntlocals; 7354 uint_t id; 7355 7356 ASSERT(MUTEX_HELD(&dtrace_lock)); 7357 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 7358 7359 for (i = 0; i < dp->dtdo_varlen; i++) { 7360 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7361 dtrace_statvar_t *svar, ***svarp; 7362 size_t dsize = 0; 7363 uint8_t scope = v->dtdv_scope; 7364 int *np; 7365 7366 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7367 continue; 7368 7369 id -= DIF_VAR_OTHER_UBASE; 7370 7371 switch (scope) { 7372 case DIFV_SCOPE_THREAD: 7373 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 7374 dtrace_difv_t *tlocals; 7375 7376 if ((ntlocals = (otlocals << 1)) == 0) 7377 ntlocals = 1; 7378 7379 osz = otlocals * sizeof (dtrace_difv_t); 7380 nsz = ntlocals * sizeof (dtrace_difv_t); 7381 7382 tlocals = kmem_zalloc(nsz, KM_SLEEP); 7383 7384 if (osz != 0) { 7385 bcopy(vstate->dtvs_tlocals, 7386 tlocals, osz); 7387 kmem_free(vstate->dtvs_tlocals, osz); 7388 } 7389 7390 vstate->dtvs_tlocals = tlocals; 7391 vstate->dtvs_ntlocals = ntlocals; 7392 } 7393 7394 vstate->dtvs_tlocals[id] = *v; 7395 continue; 7396 7397 case DIFV_SCOPE_LOCAL: 7398 np = &vstate->dtvs_nlocals; 7399 svarp = &vstate->dtvs_locals; 7400 7401 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7402 dsize = NCPU * (v->dtdv_type.dtdt_size + 7403 sizeof (uint64_t)); 7404 else 7405 dsize = NCPU * sizeof (uint64_t); 7406 7407 break; 7408 7409 case DIFV_SCOPE_GLOBAL: 7410 np = &vstate->dtvs_nglobals; 7411 svarp = &vstate->dtvs_globals; 7412 7413 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7414 dsize = v->dtdv_type.dtdt_size + 7415 sizeof (uint64_t); 7416 7417 break; 7418 7419 default: 7420 ASSERT(0); 7421 } 7422 7423 while (id >= (oldsvars = *np)) { 7424 dtrace_statvar_t **statics; 7425 int newsvars, oldsize, newsize; 7426 7427 if ((newsvars = (oldsvars << 1)) == 0) 7428 newsvars = 1; 7429 7430 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 7431 newsize = newsvars * sizeof (dtrace_statvar_t *); 7432 7433 statics = kmem_zalloc(newsize, KM_SLEEP); 7434 7435 if (oldsize != 0) { 7436 bcopy(*svarp, statics, oldsize); 7437 kmem_free(*svarp, oldsize); 7438 } 7439 7440 *svarp = statics; 7441 *np = newsvars; 7442 } 7443 7444 if ((svar = (*svarp)[id]) == NULL) { 7445 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 7446 svar->dtsv_var = *v; 7447 7448 if ((svar->dtsv_size = dsize) != 0) { 7449 svar->dtsv_data = (uint64_t)(uintptr_t) 7450 kmem_zalloc(dsize, KM_SLEEP); 7451 } 7452 7453 (*svarp)[id] = svar; 7454 } 7455 7456 svar->dtsv_refcnt++; 7457 } 7458 7459 dtrace_difo_chunksize(dp, vstate); 7460 dtrace_difo_hold(dp); 7461 } 7462 7463 static dtrace_difo_t * 7464 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7465 { 7466 dtrace_difo_t *new; 7467 size_t sz; 7468 7469 ASSERT(dp->dtdo_buf != NULL); 7470 ASSERT(dp->dtdo_refcnt != 0); 7471 7472 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 7473 7474 ASSERT(dp->dtdo_buf != NULL); 7475 sz = dp->dtdo_len * sizeof (dif_instr_t); 7476 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 7477 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 7478 new->dtdo_len = dp->dtdo_len; 7479 7480 if (dp->dtdo_strtab != NULL) { 7481 ASSERT(dp->dtdo_strlen != 0); 7482 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 7483 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 7484 new->dtdo_strlen = dp->dtdo_strlen; 7485 } 7486 7487 if (dp->dtdo_inttab != NULL) { 7488 ASSERT(dp->dtdo_intlen != 0); 7489 sz = dp->dtdo_intlen * sizeof (uint64_t); 7490 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 7491 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 7492 new->dtdo_intlen = dp->dtdo_intlen; 7493 } 7494 7495 if (dp->dtdo_vartab != NULL) { 7496 ASSERT(dp->dtdo_varlen != 0); 7497 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 7498 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 7499 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 7500 new->dtdo_varlen = dp->dtdo_varlen; 7501 } 7502 7503 dtrace_difo_init(new, vstate); 7504 return (new); 7505 } 7506 7507 static void 7508 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7509 { 7510 int i; 7511 7512 ASSERT(dp->dtdo_refcnt == 0); 7513 7514 for (i = 0; i < dp->dtdo_varlen; i++) { 7515 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7516 dtrace_statvar_t *svar, **svarp; 7517 uint_t id; 7518 uint8_t scope = v->dtdv_scope; 7519 int *np; 7520 7521 switch (scope) { 7522 case DIFV_SCOPE_THREAD: 7523 continue; 7524 7525 case DIFV_SCOPE_LOCAL: 7526 np = &vstate->dtvs_nlocals; 7527 svarp = vstate->dtvs_locals; 7528 break; 7529 7530 case DIFV_SCOPE_GLOBAL: 7531 np = &vstate->dtvs_nglobals; 7532 svarp = vstate->dtvs_globals; 7533 break; 7534 7535 default: 7536 ASSERT(0); 7537 } 7538 7539 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7540 continue; 7541 7542 id -= DIF_VAR_OTHER_UBASE; 7543 ASSERT(id < *np); 7544 7545 svar = svarp[id]; 7546 ASSERT(svar != NULL); 7547 ASSERT(svar->dtsv_refcnt > 0); 7548 7549 if (--svar->dtsv_refcnt > 0) 7550 continue; 7551 7552 if (svar->dtsv_size != 0) { 7553 ASSERT(svar->dtsv_data != NULL); 7554 kmem_free((void *)(uintptr_t)svar->dtsv_data, 7555 svar->dtsv_size); 7556 } 7557 7558 kmem_free(svar, sizeof (dtrace_statvar_t)); 7559 svarp[id] = NULL; 7560 } 7561 7562 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 7563 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 7564 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 7565 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 7566 7567 kmem_free(dp, sizeof (dtrace_difo_t)); 7568 } 7569 7570 static void 7571 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7572 { 7573 int i; 7574 7575 ASSERT(MUTEX_HELD(&dtrace_lock)); 7576 ASSERT(dp->dtdo_refcnt != 0); 7577 7578 for (i = 0; i < dp->dtdo_varlen; i++) { 7579 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7580 7581 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7582 continue; 7583 7584 ASSERT(dtrace_vtime_references > 0); 7585 if (--dtrace_vtime_references == 0) 7586 dtrace_vtime_disable(); 7587 } 7588 7589 if (--dp->dtdo_refcnt == 0) 7590 dtrace_difo_destroy(dp, vstate); 7591 } 7592 7593 /* 7594 * DTrace Format Functions 7595 */ 7596 static uint16_t 7597 dtrace_format_add(dtrace_state_t *state, char *str) 7598 { 7599 char *fmt, **new; 7600 uint16_t ndx, len = strlen(str) + 1; 7601 7602 fmt = kmem_zalloc(len, KM_SLEEP); 7603 bcopy(str, fmt, len); 7604 7605 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 7606 if (state->dts_formats[ndx] == NULL) { 7607 state->dts_formats[ndx] = fmt; 7608 return (ndx + 1); 7609 } 7610 } 7611 7612 if (state->dts_nformats == USHRT_MAX) { 7613 /* 7614 * This is only likely if a denial-of-service attack is being 7615 * attempted. As such, it's okay to fail silently here. 7616 */ 7617 kmem_free(fmt, len); 7618 return (0); 7619 } 7620 7621 /* 7622 * For simplicity, we always resize the formats array to be exactly the 7623 * number of formats. 7624 */ 7625 ndx = state->dts_nformats++; 7626 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 7627 7628 if (state->dts_formats != NULL) { 7629 ASSERT(ndx != 0); 7630 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 7631 kmem_free(state->dts_formats, ndx * sizeof (char *)); 7632 } 7633 7634 state->dts_formats = new; 7635 state->dts_formats[ndx] = fmt; 7636 7637 return (ndx + 1); 7638 } 7639 7640 static void 7641 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 7642 { 7643 char *fmt; 7644 7645 ASSERT(state->dts_formats != NULL); 7646 ASSERT(format <= state->dts_nformats); 7647 ASSERT(state->dts_formats[format - 1] != NULL); 7648 7649 fmt = state->dts_formats[format - 1]; 7650 kmem_free(fmt, strlen(fmt) + 1); 7651 state->dts_formats[format - 1] = NULL; 7652 } 7653 7654 static void 7655 dtrace_format_destroy(dtrace_state_t *state) 7656 { 7657 int i; 7658 7659 if (state->dts_nformats == 0) { 7660 ASSERT(state->dts_formats == NULL); 7661 return; 7662 } 7663 7664 ASSERT(state->dts_formats != NULL); 7665 7666 for (i = 0; i < state->dts_nformats; i++) { 7667 char *fmt = state->dts_formats[i]; 7668 7669 if (fmt == NULL) 7670 continue; 7671 7672 kmem_free(fmt, strlen(fmt) + 1); 7673 } 7674 7675 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 7676 state->dts_nformats = 0; 7677 state->dts_formats = NULL; 7678 } 7679 7680 /* 7681 * DTrace Predicate Functions 7682 */ 7683 static dtrace_predicate_t * 7684 dtrace_predicate_create(dtrace_difo_t *dp) 7685 { 7686 dtrace_predicate_t *pred; 7687 7688 ASSERT(MUTEX_HELD(&dtrace_lock)); 7689 ASSERT(dp->dtdo_refcnt != 0); 7690 7691 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 7692 pred->dtp_difo = dp; 7693 pred->dtp_refcnt = 1; 7694 7695 if (!dtrace_difo_cacheable(dp)) 7696 return (pred); 7697 7698 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 7699 /* 7700 * This is only theoretically possible -- we have had 2^32 7701 * cacheable predicates on this machine. We cannot allow any 7702 * more predicates to become cacheable: as unlikely as it is, 7703 * there may be a thread caching a (now stale) predicate cache 7704 * ID. (N.B.: the temptation is being successfully resisted to 7705 * have this cmn_err() "Holy shit -- we executed this code!") 7706 */ 7707 return (pred); 7708 } 7709 7710 pred->dtp_cacheid = dtrace_predcache_id++; 7711 7712 return (pred); 7713 } 7714 7715 static void 7716 dtrace_predicate_hold(dtrace_predicate_t *pred) 7717 { 7718 ASSERT(MUTEX_HELD(&dtrace_lock)); 7719 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 7720 ASSERT(pred->dtp_refcnt > 0); 7721 7722 pred->dtp_refcnt++; 7723 } 7724 7725 static void 7726 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 7727 { 7728 dtrace_difo_t *dp = pred->dtp_difo; 7729 7730 ASSERT(MUTEX_HELD(&dtrace_lock)); 7731 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 7732 ASSERT(pred->dtp_refcnt > 0); 7733 7734 if (--pred->dtp_refcnt == 0) { 7735 dtrace_difo_release(pred->dtp_difo, vstate); 7736 kmem_free(pred, sizeof (dtrace_predicate_t)); 7737 } 7738 } 7739 7740 /* 7741 * DTrace Action Description Functions 7742 */ 7743 static dtrace_actdesc_t * 7744 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 7745 uint64_t uarg, uint64_t arg) 7746 { 7747 dtrace_actdesc_t *act; 7748 7749 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 7750 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 7751 7752 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 7753 act->dtad_kind = kind; 7754 act->dtad_ntuple = ntuple; 7755 act->dtad_uarg = uarg; 7756 act->dtad_arg = arg; 7757 act->dtad_refcnt = 1; 7758 7759 return (act); 7760 } 7761 7762 static void 7763 dtrace_actdesc_hold(dtrace_actdesc_t *act) 7764 { 7765 ASSERT(act->dtad_refcnt >= 1); 7766 act->dtad_refcnt++; 7767 } 7768 7769 static void 7770 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 7771 { 7772 dtrace_actkind_t kind = act->dtad_kind; 7773 dtrace_difo_t *dp; 7774 7775 ASSERT(act->dtad_refcnt >= 1); 7776 7777 if (--act->dtad_refcnt != 0) 7778 return; 7779 7780 if ((dp = act->dtad_difo) != NULL) 7781 dtrace_difo_release(dp, vstate); 7782 7783 if (DTRACEACT_ISPRINTFLIKE(kind)) { 7784 char *str = (char *)(uintptr_t)act->dtad_arg; 7785 7786 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 7787 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 7788 7789 if (str != NULL) 7790 kmem_free(str, strlen(str) + 1); 7791 } 7792 7793 kmem_free(act, sizeof (dtrace_actdesc_t)); 7794 } 7795 7796 /* 7797 * DTrace ECB Functions 7798 */ 7799 static dtrace_ecb_t * 7800 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 7801 { 7802 dtrace_ecb_t *ecb; 7803 dtrace_epid_t epid; 7804 7805 ASSERT(MUTEX_HELD(&dtrace_lock)); 7806 7807 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 7808 ecb->dte_predicate = NULL; 7809 ecb->dte_probe = probe; 7810 7811 /* 7812 * The default size is the size of the default action: recording 7813 * the epid. 7814 */ 7815 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 7816 ecb->dte_alignment = sizeof (dtrace_epid_t); 7817 7818 epid = state->dts_epid++; 7819 7820 if (epid - 1 >= state->dts_necbs) { 7821 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 7822 int necbs = state->dts_necbs << 1; 7823 7824 ASSERT(epid == state->dts_necbs + 1); 7825 7826 if (necbs == 0) { 7827 ASSERT(oecbs == NULL); 7828 necbs = 1; 7829 } 7830 7831 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 7832 7833 if (oecbs != NULL) 7834 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 7835 7836 dtrace_membar_producer(); 7837 state->dts_ecbs = ecbs; 7838 7839 if (oecbs != NULL) { 7840 /* 7841 * If this state is active, we must dtrace_sync() 7842 * before we can free the old dts_ecbs array: we're 7843 * coming in hot, and there may be active ring 7844 * buffer processing (which indexes into the dts_ecbs 7845 * array) on another CPU. 7846 */ 7847 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 7848 dtrace_sync(); 7849 7850 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 7851 } 7852 7853 dtrace_membar_producer(); 7854 state->dts_necbs = necbs; 7855 } 7856 7857 ecb->dte_state = state; 7858 7859 ASSERT(state->dts_ecbs[epid - 1] == NULL); 7860 dtrace_membar_producer(); 7861 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 7862 7863 return (ecb); 7864 } 7865 7866 static void 7867 dtrace_ecb_enable(dtrace_ecb_t *ecb) 7868 { 7869 dtrace_probe_t *probe = ecb->dte_probe; 7870 7871 ASSERT(MUTEX_HELD(&cpu_lock)); 7872 ASSERT(MUTEX_HELD(&dtrace_lock)); 7873 ASSERT(ecb->dte_next == NULL); 7874 7875 if (probe == NULL) { 7876 /* 7877 * This is the NULL probe -- there's nothing to do. 7878 */ 7879 return; 7880 } 7881 7882 if (probe->dtpr_ecb == NULL) { 7883 dtrace_provider_t *prov = probe->dtpr_provider; 7884 7885 /* 7886 * We're the first ECB on this probe. 7887 */ 7888 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 7889 7890 if (ecb->dte_predicate != NULL) 7891 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 7892 7893 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 7894 probe->dtpr_id, probe->dtpr_arg); 7895 } else { 7896 /* 7897 * This probe is already active. Swing the last pointer to 7898 * point to the new ECB, and issue a dtrace_sync() to assure 7899 * that all CPUs have seen the change. 7900 */ 7901 ASSERT(probe->dtpr_ecb_last != NULL); 7902 probe->dtpr_ecb_last->dte_next = ecb; 7903 probe->dtpr_ecb_last = ecb; 7904 probe->dtpr_predcache = 0; 7905 7906 dtrace_sync(); 7907 } 7908 } 7909 7910 static void 7911 dtrace_ecb_resize(dtrace_ecb_t *ecb) 7912 { 7913 uint32_t maxalign = sizeof (dtrace_epid_t); 7914 uint32_t align = sizeof (uint8_t), offs, diff; 7915 dtrace_action_t *act; 7916 int wastuple = 0; 7917 uint32_t aggbase = UINT32_MAX; 7918 dtrace_state_t *state = ecb->dte_state; 7919 7920 /* 7921 * If we record anything, we always record the epid. (And we always 7922 * record it first.) 7923 */ 7924 offs = sizeof (dtrace_epid_t); 7925 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 7926 7927 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 7928 dtrace_recdesc_t *rec = &act->dta_rec; 7929 7930 if ((align = rec->dtrd_alignment) > maxalign) 7931 maxalign = align; 7932 7933 if (!wastuple && act->dta_intuple) { 7934 /* 7935 * This is the first record in a tuple. Align the 7936 * offset to be at offset 4 in an 8-byte aligned 7937 * block. 7938 */ 7939 diff = offs + sizeof (dtrace_aggid_t); 7940 7941 if (diff = (diff & (sizeof (uint64_t) - 1))) 7942 offs += sizeof (uint64_t) - diff; 7943 7944 aggbase = offs - sizeof (dtrace_aggid_t); 7945 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 7946 } 7947 7948 /*LINTED*/ 7949 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 7950 /* 7951 * The current offset is not properly aligned; align it. 7952 */ 7953 offs += align - diff; 7954 } 7955 7956 rec->dtrd_offset = offs; 7957 7958 if (offs + rec->dtrd_size > ecb->dte_needed) { 7959 ecb->dte_needed = offs + rec->dtrd_size; 7960 7961 if (ecb->dte_needed > state->dts_needed) 7962 state->dts_needed = ecb->dte_needed; 7963 } 7964 7965 if (DTRACEACT_ISAGG(act->dta_kind)) { 7966 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 7967 dtrace_action_t *first = agg->dtag_first, *prev; 7968 7969 ASSERT(rec->dtrd_size != 0 && first != NULL); 7970 ASSERT(wastuple); 7971 ASSERT(aggbase != UINT32_MAX); 7972 7973 agg->dtag_base = aggbase; 7974 7975 while ((prev = first->dta_prev) != NULL && 7976 DTRACEACT_ISAGG(prev->dta_kind)) { 7977 agg = (dtrace_aggregation_t *)prev; 7978 first = agg->dtag_first; 7979 } 7980 7981 if (prev != NULL) { 7982 offs = prev->dta_rec.dtrd_offset + 7983 prev->dta_rec.dtrd_size; 7984 } else { 7985 offs = sizeof (dtrace_epid_t); 7986 } 7987 wastuple = 0; 7988 } else { 7989 if (!act->dta_intuple) 7990 ecb->dte_size = offs + rec->dtrd_size; 7991 7992 offs += rec->dtrd_size; 7993 } 7994 7995 wastuple = act->dta_intuple; 7996 } 7997 7998 if ((act = ecb->dte_action) != NULL && 7999 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8000 ecb->dte_size == sizeof (dtrace_epid_t)) { 8001 /* 8002 * If the size is still sizeof (dtrace_epid_t), then all 8003 * actions store no data; set the size to 0. 8004 */ 8005 ecb->dte_alignment = maxalign; 8006 ecb->dte_size = 0; 8007 8008 /* 8009 * If the needed space is still sizeof (dtrace_epid_t), then 8010 * all actions need no additional space; set the needed 8011 * size to 0. 8012 */ 8013 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8014 ecb->dte_needed = 0; 8015 8016 return; 8017 } 8018 8019 /* 8020 * Set our alignment, and make sure that the dte_size and dte_needed 8021 * are aligned to the size of an EPID. 8022 */ 8023 ecb->dte_alignment = maxalign; 8024 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8025 ~(sizeof (dtrace_epid_t) - 1); 8026 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8027 ~(sizeof (dtrace_epid_t) - 1); 8028 ASSERT(ecb->dte_size <= ecb->dte_needed); 8029 } 8030 8031 static dtrace_action_t * 8032 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8033 { 8034 dtrace_aggregation_t *agg; 8035 size_t size = sizeof (uint64_t); 8036 int ntuple = desc->dtad_ntuple; 8037 dtrace_action_t *act; 8038 dtrace_recdesc_t *frec; 8039 dtrace_aggid_t aggid; 8040 dtrace_state_t *state = ecb->dte_state; 8041 8042 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8043 agg->dtag_ecb = ecb; 8044 8045 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8046 8047 switch (desc->dtad_kind) { 8048 case DTRACEAGG_MIN: 8049 agg->dtag_initial = UINT64_MAX; 8050 agg->dtag_aggregate = dtrace_aggregate_min; 8051 break; 8052 8053 case DTRACEAGG_MAX: 8054 agg->dtag_aggregate = dtrace_aggregate_max; 8055 break; 8056 8057 case DTRACEAGG_COUNT: 8058 agg->dtag_aggregate = dtrace_aggregate_count; 8059 break; 8060 8061 case DTRACEAGG_QUANTIZE: 8062 agg->dtag_aggregate = dtrace_aggregate_quantize; 8063 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8064 sizeof (uint64_t); 8065 break; 8066 8067 case DTRACEAGG_LQUANTIZE: { 8068 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8069 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8070 8071 agg->dtag_initial = desc->dtad_arg; 8072 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8073 8074 if (step == 0 || levels == 0) 8075 goto err; 8076 8077 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8078 break; 8079 } 8080 8081 case DTRACEAGG_AVG: 8082 agg->dtag_aggregate = dtrace_aggregate_avg; 8083 size = sizeof (uint64_t) * 2; 8084 break; 8085 8086 case DTRACEAGG_SUM: 8087 agg->dtag_aggregate = dtrace_aggregate_sum; 8088 break; 8089 8090 default: 8091 goto err; 8092 } 8093 8094 agg->dtag_action.dta_rec.dtrd_size = size; 8095 8096 if (ntuple == 0) 8097 goto err; 8098 8099 /* 8100 * We must make sure that we have enough actions for the n-tuple. 8101 */ 8102 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8103 if (DTRACEACT_ISAGG(act->dta_kind)) 8104 break; 8105 8106 if (--ntuple == 0) { 8107 /* 8108 * This is the action with which our n-tuple begins. 8109 */ 8110 agg->dtag_first = act; 8111 goto success; 8112 } 8113 } 8114 8115 /* 8116 * This n-tuple is short by ntuple elements. Return failure. 8117 */ 8118 ASSERT(ntuple != 0); 8119 err: 8120 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8121 return (NULL); 8122 8123 success: 8124 /* 8125 * We need to allocate an id for this aggregation. 8126 */ 8127 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8128 VM_BESTFIT | VM_SLEEP); 8129 8130 if (aggid - 1 >= state->dts_naggregations) { 8131 dtrace_aggregation_t **oaggs = state->dts_aggregations; 8132 dtrace_aggregation_t **aggs; 8133 int naggs = state->dts_naggregations << 1; 8134 int onaggs = state->dts_naggregations; 8135 8136 ASSERT(aggid == state->dts_naggregations + 1); 8137 8138 if (naggs == 0) { 8139 ASSERT(oaggs == NULL); 8140 naggs = 1; 8141 } 8142 8143 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 8144 8145 if (oaggs != NULL) { 8146 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 8147 kmem_free(oaggs, onaggs * sizeof (*aggs)); 8148 } 8149 8150 state->dts_aggregations = aggs; 8151 state->dts_naggregations = naggs; 8152 } 8153 8154 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 8155 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 8156 8157 frec = &agg->dtag_first->dta_rec; 8158 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 8159 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 8160 8161 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 8162 ASSERT(!act->dta_intuple); 8163 act->dta_intuple = 1; 8164 } 8165 8166 return (&agg->dtag_action); 8167 } 8168 8169 static void 8170 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 8171 { 8172 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8173 dtrace_state_t *state = ecb->dte_state; 8174 dtrace_aggid_t aggid = agg->dtag_id; 8175 8176 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 8177 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 8178 8179 ASSERT(state->dts_aggregations[aggid - 1] == agg); 8180 state->dts_aggregations[aggid - 1] = NULL; 8181 8182 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8183 } 8184 8185 static int 8186 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8187 { 8188 dtrace_action_t *action, *last; 8189 dtrace_difo_t *dp = desc->dtad_difo; 8190 uint32_t size = 0, align = sizeof (uint8_t), mask; 8191 uint16_t format = 0; 8192 dtrace_recdesc_t *rec; 8193 dtrace_state_t *state = ecb->dte_state; 8194 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 8195 uint64_t arg = desc->dtad_arg; 8196 8197 ASSERT(MUTEX_HELD(&dtrace_lock)); 8198 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 8199 8200 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 8201 /* 8202 * If this is an aggregating action, there must be neither 8203 * a speculate nor a commit on the action chain. 8204 */ 8205 dtrace_action_t *act; 8206 8207 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8208 if (act->dta_kind == DTRACEACT_COMMIT) 8209 return (EINVAL); 8210 8211 if (act->dta_kind == DTRACEACT_SPECULATE) 8212 return (EINVAL); 8213 } 8214 8215 action = dtrace_ecb_aggregation_create(ecb, desc); 8216 8217 if (action == NULL) 8218 return (EINVAL); 8219 } else { 8220 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 8221 (desc->dtad_kind == DTRACEACT_DIFEXPR && 8222 dp != NULL && dp->dtdo_destructive)) { 8223 state->dts_destructive = 1; 8224 } 8225 8226 switch (desc->dtad_kind) { 8227 case DTRACEACT_PRINTF: 8228 case DTRACEACT_PRINTA: 8229 case DTRACEACT_SYSTEM: 8230 case DTRACEACT_FREOPEN: 8231 /* 8232 * We know that our arg is a string -- turn it into a 8233 * format. 8234 */ 8235 if (arg == NULL) { 8236 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 8237 format = 0; 8238 } else { 8239 ASSERT(arg != NULL); 8240 ASSERT(arg > KERNELBASE); 8241 format = dtrace_format_add(state, 8242 (char *)(uintptr_t)arg); 8243 } 8244 8245 /*FALLTHROUGH*/ 8246 case DTRACEACT_LIBACT: 8247 case DTRACEACT_DIFEXPR: 8248 if (dp == NULL) 8249 return (EINVAL); 8250 8251 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 8252 break; 8253 8254 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 8255 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8256 return (EINVAL); 8257 8258 size = opt[DTRACEOPT_STRSIZE]; 8259 } 8260 8261 break; 8262 8263 case DTRACEACT_STACK: 8264 if ((nframes = arg) == 0) { 8265 nframes = opt[DTRACEOPT_STACKFRAMES]; 8266 ASSERT(nframes > 0); 8267 arg = nframes; 8268 } 8269 8270 size = nframes * sizeof (pc_t); 8271 break; 8272 8273 case DTRACEACT_JSTACK: 8274 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 8275 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 8276 8277 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 8278 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 8279 8280 arg = DTRACE_USTACK_ARG(nframes, strsize); 8281 8282 /*FALLTHROUGH*/ 8283 case DTRACEACT_USTACK: 8284 if (desc->dtad_kind != DTRACEACT_JSTACK && 8285 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 8286 strsize = DTRACE_USTACK_STRSIZE(arg); 8287 nframes = opt[DTRACEOPT_USTACKFRAMES]; 8288 ASSERT(nframes > 0); 8289 arg = DTRACE_USTACK_ARG(nframes, strsize); 8290 } 8291 8292 /* 8293 * Save a slot for the pid. 8294 */ 8295 size = (nframes + 1) * sizeof (uint64_t); 8296 size += DTRACE_USTACK_STRSIZE(arg); 8297 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 8298 8299 break; 8300 8301 case DTRACEACT_STOP: 8302 case DTRACEACT_BREAKPOINT: 8303 case DTRACEACT_PANIC: 8304 break; 8305 8306 case DTRACEACT_CHILL: 8307 case DTRACEACT_DISCARD: 8308 case DTRACEACT_RAISE: 8309 if (dp == NULL) 8310 return (EINVAL); 8311 break; 8312 8313 case DTRACEACT_EXIT: 8314 if (dp == NULL || 8315 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 8316 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8317 return (EINVAL); 8318 break; 8319 8320 case DTRACEACT_SPECULATE: 8321 8322 if (ecb->dte_action != NULL && ecb->dte_size != 0) 8323 return (EINVAL); 8324 8325 if (dp == NULL) 8326 return (EINVAL); 8327 8328 state->dts_speculates = 1; 8329 break; 8330 8331 case DTRACEACT_COMMIT: { 8332 dtrace_action_t *act = ecb->dte_action; 8333 8334 for (; act != NULL; act = act->dta_next) { 8335 if (act->dta_kind == DTRACEACT_COMMIT) 8336 return (EINVAL); 8337 } 8338 8339 if (dp == NULL) 8340 return (EINVAL); 8341 break; 8342 } 8343 8344 default: 8345 return (EINVAL); 8346 } 8347 8348 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 8349 /* 8350 * If this is a data-storing action or a speculate, 8351 * we must be sure that there isn't a commit on the 8352 * action chain. 8353 */ 8354 dtrace_action_t *act = ecb->dte_action; 8355 8356 for (; act != NULL; act = act->dta_next) { 8357 if (act->dta_kind == DTRACEACT_COMMIT) 8358 return (EINVAL); 8359 } 8360 } 8361 8362 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 8363 action->dta_rec.dtrd_size = size; 8364 } 8365 8366 action->dta_refcnt = 1; 8367 rec = &action->dta_rec; 8368 size = rec->dtrd_size; 8369 8370 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 8371 if (!(size & mask)) { 8372 align = mask + 1; 8373 break; 8374 } 8375 } 8376 8377 action->dta_kind = desc->dtad_kind; 8378 8379 if ((action->dta_difo = dp) != NULL) 8380 dtrace_difo_hold(dp); 8381 8382 rec->dtrd_action = action->dta_kind; 8383 rec->dtrd_arg = arg; 8384 8385 if (ecb->dte_state == dtrace_anon.dta_state) { 8386 /* 8387 * If this is an anonymous enabling, explicitly clear the uarg. 8388 */ 8389 rec->dtrd_uarg = 0; 8390 } else { 8391 rec->dtrd_uarg = desc->dtad_uarg; 8392 } 8393 8394 rec->dtrd_alignment = (uint16_t)align; 8395 rec->dtrd_format = format; 8396 8397 if ((last = ecb->dte_action_last) != NULL) { 8398 ASSERT(ecb->dte_action != NULL); 8399 action->dta_prev = last; 8400 last->dta_next = action; 8401 } else { 8402 ASSERT(ecb->dte_action == NULL); 8403 ecb->dte_action = action; 8404 } 8405 8406 ecb->dte_action_last = action; 8407 8408 return (0); 8409 } 8410 8411 static void 8412 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 8413 { 8414 dtrace_action_t *act = ecb->dte_action, *next; 8415 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 8416 dtrace_difo_t *dp; 8417 uint16_t format; 8418 8419 if (act != NULL && act->dta_refcnt > 1) { 8420 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 8421 act->dta_refcnt--; 8422 } else { 8423 for (; act != NULL; act = next) { 8424 next = act->dta_next; 8425 ASSERT(next != NULL || act == ecb->dte_action_last); 8426 ASSERT(act->dta_refcnt == 1); 8427 8428 if ((format = act->dta_rec.dtrd_format) != 0) 8429 dtrace_format_remove(ecb->dte_state, format); 8430 8431 if ((dp = act->dta_difo) != NULL) 8432 dtrace_difo_release(dp, vstate); 8433 8434 if (DTRACEACT_ISAGG(act->dta_kind)) { 8435 dtrace_ecb_aggregation_destroy(ecb, act); 8436 } else { 8437 kmem_free(act, sizeof (dtrace_action_t)); 8438 } 8439 } 8440 } 8441 8442 ecb->dte_action = NULL; 8443 ecb->dte_action_last = NULL; 8444 ecb->dte_size = sizeof (dtrace_epid_t); 8445 } 8446 8447 static void 8448 dtrace_ecb_disable(dtrace_ecb_t *ecb) 8449 { 8450 /* 8451 * We disable the ECB by removing it from its probe. 8452 */ 8453 dtrace_ecb_t *pecb, *prev = NULL; 8454 dtrace_probe_t *probe = ecb->dte_probe; 8455 8456 ASSERT(MUTEX_HELD(&dtrace_lock)); 8457 8458 if (probe == NULL) { 8459 /* 8460 * This is the NULL probe; there is nothing to disable. 8461 */ 8462 return; 8463 } 8464 8465 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 8466 if (pecb == ecb) 8467 break; 8468 prev = pecb; 8469 } 8470 8471 ASSERT(pecb != NULL); 8472 8473 if (prev == NULL) { 8474 probe->dtpr_ecb = ecb->dte_next; 8475 } else { 8476 prev->dte_next = ecb->dte_next; 8477 } 8478 8479 if (ecb == probe->dtpr_ecb_last) { 8480 ASSERT(ecb->dte_next == NULL); 8481 probe->dtpr_ecb_last = prev; 8482 } 8483 8484 /* 8485 * The ECB has been disconnected from the probe; now sync to assure 8486 * that all CPUs have seen the change before returning. 8487 */ 8488 dtrace_sync(); 8489 8490 if (probe->dtpr_ecb == NULL) { 8491 /* 8492 * That was the last ECB on the probe; clear the predicate 8493 * cache ID for the probe, disable it and sync one more time 8494 * to assure that we'll never hit it again. 8495 */ 8496 dtrace_provider_t *prov = probe->dtpr_provider; 8497 8498 ASSERT(ecb->dte_next == NULL); 8499 ASSERT(probe->dtpr_ecb_last == NULL); 8500 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 8501 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 8502 probe->dtpr_id, probe->dtpr_arg); 8503 dtrace_sync(); 8504 } else { 8505 /* 8506 * There is at least one ECB remaining on the probe. If there 8507 * is _exactly_ one, set the probe's predicate cache ID to be 8508 * the predicate cache ID of the remaining ECB. 8509 */ 8510 ASSERT(probe->dtpr_ecb_last != NULL); 8511 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 8512 8513 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 8514 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 8515 8516 ASSERT(probe->dtpr_ecb->dte_next == NULL); 8517 8518 if (p != NULL) 8519 probe->dtpr_predcache = p->dtp_cacheid; 8520 } 8521 8522 ecb->dte_next = NULL; 8523 } 8524 } 8525 8526 static void 8527 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 8528 { 8529 dtrace_state_t *state = ecb->dte_state; 8530 dtrace_vstate_t *vstate = &state->dts_vstate; 8531 dtrace_predicate_t *pred; 8532 dtrace_epid_t epid = ecb->dte_epid; 8533 8534 ASSERT(MUTEX_HELD(&dtrace_lock)); 8535 ASSERT(ecb->dte_next == NULL); 8536 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 8537 8538 if ((pred = ecb->dte_predicate) != NULL) 8539 dtrace_predicate_release(pred, vstate); 8540 8541 dtrace_ecb_action_remove(ecb); 8542 8543 ASSERT(state->dts_ecbs[epid - 1] == ecb); 8544 state->dts_ecbs[epid - 1] = NULL; 8545 8546 kmem_free(ecb, sizeof (dtrace_ecb_t)); 8547 } 8548 8549 static dtrace_ecb_t * 8550 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 8551 dtrace_enabling_t *enab) 8552 { 8553 dtrace_ecb_t *ecb; 8554 dtrace_predicate_t *pred; 8555 dtrace_actdesc_t *act; 8556 dtrace_provider_t *prov; 8557 dtrace_ecbdesc_t *desc = enab->dten_current; 8558 8559 ASSERT(MUTEX_HELD(&dtrace_lock)); 8560 ASSERT(state != NULL); 8561 8562 ecb = dtrace_ecb_add(state, probe); 8563 ecb->dte_uarg = desc->dted_uarg; 8564 8565 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 8566 dtrace_predicate_hold(pred); 8567 ecb->dte_predicate = pred; 8568 } 8569 8570 if (probe != NULL) { 8571 /* 8572 * If the provider shows more leg than the consumer is old 8573 * enough to see, we need to enable the appropriate implicit 8574 * predicate bits to prevent the ecb from activating at 8575 * revealing times. 8576 */ 8577 prov = probe->dtpr_provider; 8578 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 8579 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 8580 ecb->dte_cond |= DTRACE_COND_OWNER; 8581 8582 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 8583 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 8584 ecb->dte_cond |= DTRACE_COND_USERMODE; 8585 } 8586 8587 if (dtrace_ecb_create_cache != NULL) { 8588 /* 8589 * If we have a cached ecb, we'll use its action list instead 8590 * of creating our own (saving both time and space). 8591 */ 8592 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 8593 dtrace_action_t *act = cached->dte_action; 8594 8595 if (act != NULL) { 8596 ASSERT(act->dta_refcnt > 0); 8597 act->dta_refcnt++; 8598 ecb->dte_action = act; 8599 ecb->dte_action_last = cached->dte_action_last; 8600 ecb->dte_needed = cached->dte_needed; 8601 ecb->dte_size = cached->dte_size; 8602 ecb->dte_alignment = cached->dte_alignment; 8603 } 8604 8605 return (ecb); 8606 } 8607 8608 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 8609 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 8610 dtrace_ecb_destroy(ecb); 8611 return (NULL); 8612 } 8613 } 8614 8615 dtrace_ecb_resize(ecb); 8616 8617 return (dtrace_ecb_create_cache = ecb); 8618 } 8619 8620 static int 8621 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 8622 { 8623 dtrace_ecb_t *ecb; 8624 dtrace_enabling_t *enab = arg; 8625 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 8626 8627 ASSERT(state != NULL); 8628 8629 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 8630 /* 8631 * This probe was created in a generation for which this 8632 * enabling has previously created ECBs; we don't want to 8633 * enable it again, so just kick out. 8634 */ 8635 return (DTRACE_MATCH_NEXT); 8636 } 8637 8638 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 8639 return (DTRACE_MATCH_DONE); 8640 8641 dtrace_ecb_enable(ecb); 8642 return (DTRACE_MATCH_NEXT); 8643 } 8644 8645 static dtrace_ecb_t * 8646 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 8647 { 8648 dtrace_ecb_t *ecb; 8649 8650 ASSERT(MUTEX_HELD(&dtrace_lock)); 8651 8652 if (id == 0 || id > state->dts_necbs) 8653 return (NULL); 8654 8655 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 8656 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 8657 8658 return (state->dts_ecbs[id - 1]); 8659 } 8660 8661 static dtrace_aggregation_t * 8662 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 8663 { 8664 dtrace_aggregation_t *agg; 8665 8666 ASSERT(MUTEX_HELD(&dtrace_lock)); 8667 8668 if (id == 0 || id > state->dts_naggregations) 8669 return (NULL); 8670 8671 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 8672 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 8673 agg->dtag_id == id); 8674 8675 return (state->dts_aggregations[id - 1]); 8676 } 8677 8678 /* 8679 * DTrace Buffer Functions 8680 * 8681 * The following functions manipulate DTrace buffers. Most of these functions 8682 * are called in the context of establishing or processing consumer state; 8683 * exceptions are explicitly noted. 8684 */ 8685 8686 /* 8687 * Note: called from cross call context. This function switches the two 8688 * buffers on a given CPU. The atomicity of this operation is assured by 8689 * disabling interrupts while the actual switch takes place; the disabling of 8690 * interrupts serializes the execution with any execution of dtrace_probe() on 8691 * the same CPU. 8692 */ 8693 static void 8694 dtrace_buffer_switch(dtrace_buffer_t *buf) 8695 { 8696 caddr_t tomax = buf->dtb_tomax; 8697 caddr_t xamot = buf->dtb_xamot; 8698 dtrace_icookie_t cookie; 8699 8700 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 8701 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 8702 8703 cookie = dtrace_interrupt_disable(); 8704 buf->dtb_tomax = xamot; 8705 buf->dtb_xamot = tomax; 8706 buf->dtb_xamot_drops = buf->dtb_drops; 8707 buf->dtb_xamot_offset = buf->dtb_offset; 8708 buf->dtb_xamot_errors = buf->dtb_errors; 8709 buf->dtb_xamot_flags = buf->dtb_flags; 8710 buf->dtb_offset = 0; 8711 buf->dtb_drops = 0; 8712 buf->dtb_errors = 0; 8713 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 8714 dtrace_interrupt_enable(cookie); 8715 } 8716 8717 /* 8718 * Note: called from cross call context. This function activates a buffer 8719 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 8720 * is guaranteed by the disabling of interrupts. 8721 */ 8722 static void 8723 dtrace_buffer_activate(dtrace_state_t *state) 8724 { 8725 dtrace_buffer_t *buf; 8726 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 8727 8728 buf = &state->dts_buffer[CPU->cpu_id]; 8729 8730 if (buf->dtb_tomax != NULL) { 8731 /* 8732 * We might like to assert that the buffer is marked inactive, 8733 * but this isn't necessarily true: the buffer for the CPU 8734 * that processes the BEGIN probe has its buffer activated 8735 * manually. In this case, we take the (harmless) action 8736 * re-clearing the bit INACTIVE bit. 8737 */ 8738 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 8739 } 8740 8741 dtrace_interrupt_enable(cookie); 8742 } 8743 8744 static int 8745 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 8746 processorid_t cpu) 8747 { 8748 cpu_t *cp; 8749 dtrace_buffer_t *buf; 8750 8751 ASSERT(MUTEX_HELD(&cpu_lock)); 8752 ASSERT(MUTEX_HELD(&dtrace_lock)); 8753 8754 if (crgetuid(CRED()) != 0 && size > dtrace_nonroot_maxsize) 8755 return (EFBIG); 8756 8757 cp = cpu_list; 8758 8759 do { 8760 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8761 continue; 8762 8763 buf = &bufs[cp->cpu_id]; 8764 8765 /* 8766 * If there is already a buffer allocated for this CPU, it 8767 * is only possible that this is a DR event. In this case, 8768 * the buffer size must match our specified size. 8769 */ 8770 if (buf->dtb_tomax != NULL) { 8771 ASSERT(buf->dtb_size == size); 8772 continue; 8773 } 8774 8775 ASSERT(buf->dtb_xamot == NULL); 8776 8777 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8778 goto err; 8779 8780 buf->dtb_size = size; 8781 buf->dtb_flags = flags; 8782 buf->dtb_offset = 0; 8783 buf->dtb_drops = 0; 8784 8785 if (flags & DTRACEBUF_NOSWITCH) 8786 continue; 8787 8788 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8789 goto err; 8790 } while ((cp = cp->cpu_next) != cpu_list); 8791 8792 return (0); 8793 8794 err: 8795 cp = cpu_list; 8796 8797 do { 8798 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8799 continue; 8800 8801 buf = &bufs[cp->cpu_id]; 8802 8803 if (buf->dtb_xamot != NULL) { 8804 ASSERT(buf->dtb_tomax != NULL); 8805 ASSERT(buf->dtb_size == size); 8806 kmem_free(buf->dtb_xamot, size); 8807 } 8808 8809 if (buf->dtb_tomax != NULL) { 8810 ASSERT(buf->dtb_size == size); 8811 kmem_free(buf->dtb_tomax, size); 8812 } 8813 8814 buf->dtb_tomax = NULL; 8815 buf->dtb_xamot = NULL; 8816 buf->dtb_size = 0; 8817 } while ((cp = cp->cpu_next) != cpu_list); 8818 8819 return (ENOMEM); 8820 } 8821 8822 /* 8823 * Note: called from probe context. This function just increments the drop 8824 * count on a buffer. It has been made a function to allow for the 8825 * possibility of understanding the source of mysterious drop counts. (A 8826 * problem for which one may be particularly disappointed that DTrace cannot 8827 * be used to understand DTrace.) 8828 */ 8829 static void 8830 dtrace_buffer_drop(dtrace_buffer_t *buf) 8831 { 8832 buf->dtb_drops++; 8833 } 8834 8835 /* 8836 * Note: called from probe context. This function is called to reserve space 8837 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 8838 * mstate. Returns the new offset in the buffer, or a negative value if an 8839 * error has occurred. 8840 */ 8841 static intptr_t 8842 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 8843 dtrace_state_t *state, dtrace_mstate_t *mstate) 8844 { 8845 intptr_t offs = buf->dtb_offset, soffs; 8846 intptr_t woffs; 8847 caddr_t tomax; 8848 size_t total; 8849 8850 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 8851 return (-1); 8852 8853 if ((tomax = buf->dtb_tomax) == NULL) { 8854 dtrace_buffer_drop(buf); 8855 return (-1); 8856 } 8857 8858 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 8859 while (offs & (align - 1)) { 8860 /* 8861 * Assert that our alignment is off by a number which 8862 * is itself sizeof (uint32_t) aligned. 8863 */ 8864 ASSERT(!((align - (offs & (align - 1))) & 8865 (sizeof (uint32_t) - 1))); 8866 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 8867 offs += sizeof (uint32_t); 8868 } 8869 8870 if ((soffs = offs + needed) > buf->dtb_size) { 8871 dtrace_buffer_drop(buf); 8872 return (-1); 8873 } 8874 8875 if (mstate == NULL) 8876 return (offs); 8877 8878 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 8879 mstate->dtms_scratch_size = buf->dtb_size - soffs; 8880 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 8881 8882 return (offs); 8883 } 8884 8885 if (buf->dtb_flags & DTRACEBUF_FILL) { 8886 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 8887 (buf->dtb_flags & DTRACEBUF_FULL)) 8888 return (-1); 8889 goto out; 8890 } 8891 8892 total = needed + (offs & (align - 1)); 8893 8894 /* 8895 * For a ring buffer, life is quite a bit more complicated. Before 8896 * we can store any padding, we need to adjust our wrapping offset. 8897 * (If we've never before wrapped or we're not about to, no adjustment 8898 * is required.) 8899 */ 8900 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 8901 offs + total > buf->dtb_size) { 8902 woffs = buf->dtb_xamot_offset; 8903 8904 if (offs + total > buf->dtb_size) { 8905 /* 8906 * We can't fit in the end of the buffer. First, a 8907 * sanity check that we can fit in the buffer at all. 8908 */ 8909 if (total > buf->dtb_size) { 8910 dtrace_buffer_drop(buf); 8911 return (-1); 8912 } 8913 8914 /* 8915 * We're going to be storing at the top of the buffer, 8916 * so now we need to deal with the wrapped offset. We 8917 * only reset our wrapped offset to 0 if it is 8918 * currently greater than the current offset. If it 8919 * is less than the current offset, it is because a 8920 * previous allocation induced a wrap -- but the 8921 * allocation didn't subsequently take the space due 8922 * to an error or false predicate evaluation. In this 8923 * case, we'll just leave the wrapped offset alone: if 8924 * the wrapped offset hasn't been advanced far enough 8925 * for this allocation, it will be adjusted in the 8926 * lower loop. 8927 */ 8928 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 8929 if (woffs >= offs) 8930 woffs = 0; 8931 } else { 8932 woffs = 0; 8933 } 8934 8935 /* 8936 * Now we know that we're going to be storing to the 8937 * top of the buffer and that there is room for us 8938 * there. We need to clear the buffer from the current 8939 * offset to the end (there may be old gunk there). 8940 */ 8941 while (offs < buf->dtb_size) 8942 tomax[offs++] = 0; 8943 8944 /* 8945 * We need to set our offset to zero. And because we 8946 * are wrapping, we need to set the bit indicating as 8947 * much. We can also adjust our needed space back 8948 * down to the space required by the ECB -- we know 8949 * that the top of the buffer is aligned. 8950 */ 8951 offs = 0; 8952 total = needed; 8953 buf->dtb_flags |= DTRACEBUF_WRAPPED; 8954 } else { 8955 /* 8956 * There is room for us in the buffer, so we simply 8957 * need to check the wrapped offset. 8958 */ 8959 if (woffs < offs) { 8960 /* 8961 * The wrapped offset is less than the offset. 8962 * This can happen if we allocated buffer space 8963 * that induced a wrap, but then we didn't 8964 * subsequently take the space due to an error 8965 * or false predicate evaluation. This is 8966 * okay; we know that _this_ allocation isn't 8967 * going to induce a wrap. We still can't 8968 * reset the wrapped offset to be zero, 8969 * however: the space may have been trashed in 8970 * the previous failed probe attempt. But at 8971 * least the wrapped offset doesn't need to 8972 * be adjusted at all... 8973 */ 8974 goto out; 8975 } 8976 } 8977 8978 while (offs + total > woffs) { 8979 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 8980 size_t size; 8981 8982 if (epid == DTRACE_EPIDNONE) { 8983 size = sizeof (uint32_t); 8984 } else { 8985 ASSERT(epid <= state->dts_necbs); 8986 ASSERT(state->dts_ecbs[epid - 1] != NULL); 8987 8988 size = state->dts_ecbs[epid - 1]->dte_size; 8989 } 8990 8991 ASSERT(woffs + size <= buf->dtb_size); 8992 ASSERT(size != 0); 8993 8994 if (woffs + size == buf->dtb_size) { 8995 /* 8996 * We've reached the end of the buffer; we want 8997 * to set the wrapped offset to 0 and break 8998 * out. However, if the offs is 0, then we're 8999 * in a strange edge-condition: the amount of 9000 * space that we want to reserve plus the size 9001 * of the record that we're overwriting is 9002 * greater than the size of the buffer. This 9003 * is problematic because if we reserve the 9004 * space but subsequently don't consume it (due 9005 * to a failed predicate or error) the wrapped 9006 * offset will be 0 -- yet the EPID at offset 0 9007 * will not be committed. This situation is 9008 * relatively easy to deal with: if we're in 9009 * this case, the buffer is indistinguishable 9010 * from one that hasn't wrapped; we need only 9011 * finish the job by clearing the wrapped bit, 9012 * explicitly setting the offset to be 0, and 9013 * zero'ing out the old data in the buffer. 9014 */ 9015 if (offs == 0) { 9016 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9017 buf->dtb_offset = 0; 9018 woffs = total; 9019 9020 while (woffs < buf->dtb_size) 9021 tomax[woffs++] = 0; 9022 } 9023 9024 woffs = 0; 9025 break; 9026 } 9027 9028 woffs += size; 9029 } 9030 9031 /* 9032 * We have a wrapped offset. It may be that the wrapped offset 9033 * has become zero -- that's okay. 9034 */ 9035 buf->dtb_xamot_offset = woffs; 9036 } 9037 9038 out: 9039 /* 9040 * Now we can plow the buffer with any necessary padding. 9041 */ 9042 while (offs & (align - 1)) { 9043 /* 9044 * Assert that our alignment is off by a number which 9045 * is itself sizeof (uint32_t) aligned. 9046 */ 9047 ASSERT(!((align - (offs & (align - 1))) & 9048 (sizeof (uint32_t) - 1))); 9049 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9050 offs += sizeof (uint32_t); 9051 } 9052 9053 if (buf->dtb_flags & DTRACEBUF_FILL) { 9054 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9055 buf->dtb_flags |= DTRACEBUF_FULL; 9056 return (-1); 9057 } 9058 } 9059 9060 if (mstate == NULL) 9061 return (offs); 9062 9063 /* 9064 * For ring buffers and fill buffers, the scratch space is always 9065 * the inactive buffer. 9066 */ 9067 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9068 mstate->dtms_scratch_size = buf->dtb_size; 9069 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9070 9071 return (offs); 9072 } 9073 9074 static void 9075 dtrace_buffer_polish(dtrace_buffer_t *buf) 9076 { 9077 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9078 ASSERT(MUTEX_HELD(&dtrace_lock)); 9079 9080 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9081 return; 9082 9083 /* 9084 * We need to polish the ring buffer. There are three cases: 9085 * 9086 * - The first (and presumably most common) is that there is no gap 9087 * between the buffer offset and the wrapped offset. In this case, 9088 * there is nothing in the buffer that isn't valid data; we can 9089 * mark the buffer as polished and return. 9090 * 9091 * - The second (less common than the first but still more common 9092 * than the third) is that there is a gap between the buffer offset 9093 * and the wrapped offset, and the wrapped offset is larger than the 9094 * buffer offset. This can happen because of an alignment issue, or 9095 * can happen because of a call to dtrace_buffer_reserve() that 9096 * didn't subsequently consume the buffer space. In this case, 9097 * we need to zero the data from the buffer offset to the wrapped 9098 * offset. 9099 * 9100 * - The third (and least common) is that there is a gap between the 9101 * buffer offset and the wrapped offset, but the wrapped offset is 9102 * _less_ than the buffer offset. This can only happen because a 9103 * call to dtrace_buffer_reserve() induced a wrap, but the space 9104 * was not subsequently consumed. In this case, we need to zero the 9105 * space from the offset to the end of the buffer _and_ from the 9106 * top of the buffer to the wrapped offset. 9107 */ 9108 if (buf->dtb_offset < buf->dtb_xamot_offset) { 9109 bzero(buf->dtb_tomax + buf->dtb_offset, 9110 buf->dtb_xamot_offset - buf->dtb_offset); 9111 } 9112 9113 if (buf->dtb_offset > buf->dtb_xamot_offset) { 9114 bzero(buf->dtb_tomax + buf->dtb_offset, 9115 buf->dtb_size - buf->dtb_offset); 9116 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 9117 } 9118 } 9119 9120 static void 9121 dtrace_buffer_free(dtrace_buffer_t *bufs) 9122 { 9123 int i; 9124 9125 for (i = 0; i < NCPU; i++) { 9126 dtrace_buffer_t *buf = &bufs[i]; 9127 9128 if (buf->dtb_tomax == NULL) { 9129 ASSERT(buf->dtb_xamot == NULL); 9130 ASSERT(buf->dtb_size == 0); 9131 continue; 9132 } 9133 9134 if (buf->dtb_xamot != NULL) { 9135 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9136 kmem_free(buf->dtb_xamot, buf->dtb_size); 9137 } 9138 9139 kmem_free(buf->dtb_tomax, buf->dtb_size); 9140 buf->dtb_size = 0; 9141 buf->dtb_tomax = NULL; 9142 buf->dtb_xamot = NULL; 9143 } 9144 } 9145 9146 /* 9147 * DTrace Enabling Functions 9148 */ 9149 static dtrace_enabling_t * 9150 dtrace_enabling_create(dtrace_vstate_t *vstate) 9151 { 9152 dtrace_enabling_t *enab; 9153 9154 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 9155 enab->dten_vstate = vstate; 9156 9157 return (enab); 9158 } 9159 9160 static void 9161 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 9162 { 9163 dtrace_ecbdesc_t **ndesc; 9164 size_t osize, nsize; 9165 9166 /* 9167 * We can't add to enablings after we've enabled them, or after we've 9168 * retained them. 9169 */ 9170 ASSERT(enab->dten_probegen == 0); 9171 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9172 9173 if (enab->dten_ndesc < enab->dten_maxdesc) { 9174 enab->dten_desc[enab->dten_ndesc++] = ecb; 9175 return; 9176 } 9177 9178 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9179 9180 if (enab->dten_maxdesc == 0) { 9181 enab->dten_maxdesc = 1; 9182 } else { 9183 enab->dten_maxdesc <<= 1; 9184 } 9185 9186 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 9187 9188 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9189 ndesc = kmem_zalloc(nsize, KM_SLEEP); 9190 bcopy(enab->dten_desc, ndesc, osize); 9191 kmem_free(enab->dten_desc, osize); 9192 9193 enab->dten_desc = ndesc; 9194 enab->dten_desc[enab->dten_ndesc++] = ecb; 9195 } 9196 9197 static void 9198 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 9199 dtrace_probedesc_t *pd) 9200 { 9201 dtrace_ecbdesc_t *new; 9202 dtrace_predicate_t *pred; 9203 dtrace_actdesc_t *act; 9204 9205 /* 9206 * We're going to create a new ECB description that matches the 9207 * specified ECB in every way, but has the specified probe description. 9208 */ 9209 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 9210 9211 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 9212 dtrace_predicate_hold(pred); 9213 9214 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 9215 dtrace_actdesc_hold(act); 9216 9217 new->dted_action = ecb->dted_action; 9218 new->dted_pred = ecb->dted_pred; 9219 new->dted_probe = *pd; 9220 new->dted_uarg = ecb->dted_uarg; 9221 9222 dtrace_enabling_add(enab, new); 9223 } 9224 9225 static void 9226 dtrace_enabling_dump(dtrace_enabling_t *enab) 9227 { 9228 int i; 9229 9230 for (i = 0; i < enab->dten_ndesc; i++) { 9231 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 9232 9233 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 9234 desc->dtpd_provider, desc->dtpd_mod, 9235 desc->dtpd_func, desc->dtpd_name); 9236 } 9237 } 9238 9239 static void 9240 dtrace_enabling_destroy(dtrace_enabling_t *enab) 9241 { 9242 int i; 9243 dtrace_ecbdesc_t *ep; 9244 dtrace_vstate_t *vstate = enab->dten_vstate; 9245 9246 ASSERT(MUTEX_HELD(&dtrace_lock)); 9247 9248 for (i = 0; i < enab->dten_ndesc; i++) { 9249 dtrace_actdesc_t *act, *next; 9250 dtrace_predicate_t *pred; 9251 9252 ep = enab->dten_desc[i]; 9253 9254 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 9255 dtrace_predicate_release(pred, vstate); 9256 9257 for (act = ep->dted_action; act != NULL; act = next) { 9258 next = act->dtad_next; 9259 dtrace_actdesc_release(act, vstate); 9260 } 9261 9262 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 9263 } 9264 9265 kmem_free(enab->dten_desc, 9266 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 9267 9268 /* 9269 * If this was a retained enabling, decrement the dts_nretained count 9270 * and take it off of the dtrace_retained list. 9271 */ 9272 if (enab->dten_prev != NULL || enab->dten_next != NULL || 9273 dtrace_retained == enab) { 9274 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9275 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 9276 enab->dten_vstate->dtvs_state->dts_nretained--; 9277 } 9278 9279 if (enab->dten_prev == NULL) { 9280 if (dtrace_retained == enab) { 9281 dtrace_retained = enab->dten_next; 9282 9283 if (dtrace_retained != NULL) 9284 dtrace_retained->dten_prev = NULL; 9285 } 9286 } else { 9287 ASSERT(enab != dtrace_retained); 9288 ASSERT(dtrace_retained != NULL); 9289 enab->dten_prev->dten_next = enab->dten_next; 9290 } 9291 9292 if (enab->dten_next != NULL) { 9293 ASSERT(dtrace_retained != NULL); 9294 enab->dten_next->dten_prev = enab->dten_prev; 9295 } 9296 9297 kmem_free(enab, sizeof (dtrace_enabling_t)); 9298 } 9299 9300 static int 9301 dtrace_enabling_retain(dtrace_enabling_t *enab) 9302 { 9303 dtrace_state_t *state; 9304 9305 ASSERT(MUTEX_HELD(&dtrace_lock)); 9306 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9307 ASSERT(enab->dten_vstate != NULL); 9308 9309 state = enab->dten_vstate->dtvs_state; 9310 ASSERT(state != NULL); 9311 9312 /* 9313 * We only allow each state to retain dtrace_retain_max enablings. 9314 */ 9315 if (state->dts_nretained >= dtrace_retain_max) 9316 return (ENOSPC); 9317 9318 state->dts_nretained++; 9319 9320 if (dtrace_retained == NULL) { 9321 dtrace_retained = enab; 9322 return (0); 9323 } 9324 9325 enab->dten_next = dtrace_retained; 9326 dtrace_retained->dten_prev = enab; 9327 dtrace_retained = enab; 9328 9329 return (0); 9330 } 9331 9332 static int 9333 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 9334 dtrace_probedesc_t *create) 9335 { 9336 dtrace_enabling_t *new, *enab; 9337 int found = 0, err = ENOENT; 9338 9339 ASSERT(MUTEX_HELD(&dtrace_lock)); 9340 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 9341 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 9342 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 9343 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 9344 9345 new = dtrace_enabling_create(&state->dts_vstate); 9346 9347 /* 9348 * Iterate over all retained enablings, looking for enablings that 9349 * match the specified state. 9350 */ 9351 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9352 int i; 9353 9354 /* 9355 * dtvs_state can only be NULL for helper enablings -- and 9356 * helper enablings can't be retained. 9357 */ 9358 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9359 9360 if (enab->dten_vstate->dtvs_state != state) 9361 continue; 9362 9363 /* 9364 * Now iterate over each probe description; we're looking for 9365 * an exact match to the specified probe description. 9366 */ 9367 for (i = 0; i < enab->dten_ndesc; i++) { 9368 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9369 dtrace_probedesc_t *pd = &ep->dted_probe; 9370 9371 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 9372 continue; 9373 9374 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 9375 continue; 9376 9377 if (strcmp(pd->dtpd_func, match->dtpd_func)) 9378 continue; 9379 9380 if (strcmp(pd->dtpd_name, match->dtpd_name)) 9381 continue; 9382 9383 /* 9384 * We have a winning probe! Add it to our growing 9385 * enabling. 9386 */ 9387 found = 1; 9388 dtrace_enabling_addlike(new, ep, create); 9389 } 9390 } 9391 9392 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 9393 dtrace_enabling_destroy(new); 9394 return (err); 9395 } 9396 9397 return (0); 9398 } 9399 9400 static void 9401 dtrace_enabling_retract(dtrace_state_t *state) 9402 { 9403 dtrace_enabling_t *enab, *next; 9404 9405 ASSERT(MUTEX_HELD(&dtrace_lock)); 9406 9407 /* 9408 * Iterate over all retained enablings, destroy the enablings retained 9409 * for the specified state. 9410 */ 9411 for (enab = dtrace_retained; enab != NULL; enab = next) { 9412 next = enab->dten_next; 9413 9414 /* 9415 * dtvs_state can only be NULL for helper enablings -- and 9416 * helper enablings can't be retained. 9417 */ 9418 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9419 9420 if (enab->dten_vstate->dtvs_state == state) { 9421 ASSERT(state->dts_nretained > 0); 9422 dtrace_enabling_destroy(enab); 9423 } 9424 } 9425 9426 ASSERT(state->dts_nretained == 0); 9427 } 9428 9429 static int 9430 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 9431 { 9432 int i = 0; 9433 int matched = 0; 9434 9435 ASSERT(MUTEX_HELD(&cpu_lock)); 9436 ASSERT(MUTEX_HELD(&dtrace_lock)); 9437 9438 for (i = 0; i < enab->dten_ndesc; i++) { 9439 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9440 9441 enab->dten_current = ep; 9442 enab->dten_error = 0; 9443 9444 matched += dtrace_probe_enable(&ep->dted_probe, enab); 9445 9446 if (enab->dten_error != 0) { 9447 /* 9448 * If we get an error half-way through enabling the 9449 * probes, we kick out -- perhaps with some number of 9450 * them enabled. Leaving enabled probes enabled may 9451 * be slightly confusing for user-level, but we expect 9452 * that no one will attempt to actually drive on in 9453 * the face of such errors. If this is an anonymous 9454 * enabling (indicated with a NULL nmatched pointer), 9455 * we cmn_err() a message. We aren't expecting to 9456 * get such an error -- such as it can exist at all, 9457 * it would be a result of corrupted DOF in the driver 9458 * properties. 9459 */ 9460 if (nmatched == NULL) { 9461 cmn_err(CE_WARN, "dtrace_enabling_match() " 9462 "error on %p: %d", (void *)ep, 9463 enab->dten_error); 9464 } 9465 9466 return (enab->dten_error); 9467 } 9468 } 9469 9470 enab->dten_probegen = dtrace_probegen; 9471 if (nmatched != NULL) 9472 *nmatched = matched; 9473 9474 return (0); 9475 } 9476 9477 static void 9478 dtrace_enabling_matchall(void) 9479 { 9480 dtrace_enabling_t *enab; 9481 9482 mutex_enter(&cpu_lock); 9483 mutex_enter(&dtrace_lock); 9484 9485 /* 9486 * Because we can be called after dtrace_detach() has been called, we 9487 * cannot assert that there are retained enablings. We can safely 9488 * load from dtrace_retained, however: the taskq_destroy() at the 9489 * end of dtrace_detach() will block pending our completion. 9490 */ 9491 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 9492 (void) dtrace_enabling_match(enab, NULL); 9493 9494 mutex_exit(&dtrace_lock); 9495 mutex_exit(&cpu_lock); 9496 } 9497 9498 static int 9499 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 9500 { 9501 dtrace_enabling_t *enab; 9502 int matched, total = 0, err; 9503 9504 ASSERT(MUTEX_HELD(&cpu_lock)); 9505 ASSERT(MUTEX_HELD(&dtrace_lock)); 9506 9507 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9508 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9509 9510 if (enab->dten_vstate->dtvs_state != state) 9511 continue; 9512 9513 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 9514 return (err); 9515 9516 total += matched; 9517 } 9518 9519 if (nmatched != NULL) 9520 *nmatched = total; 9521 9522 return (0); 9523 } 9524 9525 /* 9526 * If an enabling is to be enabled without having matched probes (that is, if 9527 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 9528 * enabling must be _primed_ by creating an ECB for every ECB description. 9529 * This must be done to assure that we know the number of speculations, the 9530 * number of aggregations, the minimum buffer size needed, etc. before we 9531 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 9532 * enabling any probes, we create ECBs for every ECB decription, but with a 9533 * NULL probe -- which is exactly what this function does. 9534 */ 9535 static void 9536 dtrace_enabling_prime(dtrace_state_t *state) 9537 { 9538 dtrace_enabling_t *enab; 9539 int i; 9540 9541 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9542 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9543 9544 if (enab->dten_vstate->dtvs_state != state) 9545 continue; 9546 9547 /* 9548 * We don't want to prime an enabling more than once, lest 9549 * we allow a malicious user to induce resource exhaustion. 9550 * (The ECBs that result from priming an enabling aren't 9551 * leaked -- but they also aren't deallocated until the 9552 * consumer state is destroyed.) 9553 */ 9554 if (enab->dten_primed) 9555 continue; 9556 9557 for (i = 0; i < enab->dten_ndesc; i++) { 9558 enab->dten_current = enab->dten_desc[i]; 9559 (void) dtrace_probe_enable(NULL, enab); 9560 } 9561 9562 enab->dten_primed = 1; 9563 } 9564 } 9565 9566 /* 9567 * Called to indicate that probes should be provided due to retained 9568 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 9569 * must take an initial lap through the enabling calling the dtps_provide() 9570 * entry point explicitly to allow for autocreated probes. 9571 */ 9572 static void 9573 dtrace_enabling_provide(dtrace_provider_t *prv) 9574 { 9575 int i, all = 0; 9576 dtrace_probedesc_t desc; 9577 9578 ASSERT(MUTEX_HELD(&dtrace_lock)); 9579 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9580 9581 if (prv == NULL) { 9582 all = 1; 9583 prv = dtrace_provider; 9584 } 9585 9586 do { 9587 dtrace_enabling_t *enab = dtrace_retained; 9588 void *parg = prv->dtpv_arg; 9589 9590 for (; enab != NULL; enab = enab->dten_next) { 9591 for (i = 0; i < enab->dten_ndesc; i++) { 9592 desc = enab->dten_desc[i]->dted_probe; 9593 mutex_exit(&dtrace_lock); 9594 prv->dtpv_pops.dtps_provide(parg, &desc); 9595 mutex_enter(&dtrace_lock); 9596 } 9597 } 9598 } while (all && (prv = prv->dtpv_next) != NULL); 9599 9600 mutex_exit(&dtrace_lock); 9601 dtrace_probe_provide(NULL, all ? NULL : prv); 9602 mutex_enter(&dtrace_lock); 9603 } 9604 9605 /* 9606 * DTrace DOF Functions 9607 */ 9608 /*ARGSUSED*/ 9609 static void 9610 dtrace_dof_error(dof_hdr_t *dof, const char *str) 9611 { 9612 if (dtrace_err_verbose) 9613 cmn_err(CE_WARN, "failed to process DOF: %s", str); 9614 9615 #ifdef DTRACE_ERRDEBUG 9616 dtrace_errdebug(str); 9617 #endif 9618 } 9619 9620 /* 9621 * Create DOF out of a currently enabled state. Right now, we only create 9622 * DOF containing the run-time options -- but this could be expanded to create 9623 * complete DOF representing the enabled state. 9624 */ 9625 static dof_hdr_t * 9626 dtrace_dof_create(dtrace_state_t *state) 9627 { 9628 dof_hdr_t *dof; 9629 dof_sec_t *sec; 9630 dof_optdesc_t *opt; 9631 int i, len = sizeof (dof_hdr_t) + 9632 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 9633 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9634 9635 ASSERT(MUTEX_HELD(&dtrace_lock)); 9636 9637 dof = kmem_zalloc(len, KM_SLEEP); 9638 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 9639 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 9640 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 9641 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 9642 9643 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 9644 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 9645 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION_1; 9646 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 9647 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 9648 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 9649 9650 dof->dofh_flags = 0; 9651 dof->dofh_hdrsize = sizeof (dof_hdr_t); 9652 dof->dofh_secsize = sizeof (dof_sec_t); 9653 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 9654 dof->dofh_secoff = sizeof (dof_hdr_t); 9655 dof->dofh_loadsz = len; 9656 dof->dofh_filesz = len; 9657 dof->dofh_pad = 0; 9658 9659 /* 9660 * Fill in the option section header... 9661 */ 9662 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 9663 sec->dofs_type = DOF_SECT_OPTDESC; 9664 sec->dofs_align = sizeof (uint64_t); 9665 sec->dofs_flags = DOF_SECF_LOAD; 9666 sec->dofs_entsize = sizeof (dof_optdesc_t); 9667 9668 opt = (dof_optdesc_t *)((uintptr_t)sec + 9669 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 9670 9671 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 9672 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9673 9674 for (i = 0; i < DTRACEOPT_MAX; i++) { 9675 opt[i].dofo_option = i; 9676 opt[i].dofo_strtab = DOF_SECIDX_NONE; 9677 opt[i].dofo_value = state->dts_options[i]; 9678 } 9679 9680 return (dof); 9681 } 9682 9683 static dof_hdr_t * 9684 dtrace_dof_copyin(uintptr_t uarg, int *errp) 9685 { 9686 dof_hdr_t hdr, *dof; 9687 9688 ASSERT(!MUTEX_HELD(&dtrace_lock)); 9689 9690 /* 9691 * First, we're going to copyin() the sizeof (dof_hdr_t). 9692 */ 9693 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 9694 dtrace_dof_error(NULL, "failed to copyin DOF header"); 9695 *errp = EFAULT; 9696 return (NULL); 9697 } 9698 9699 /* 9700 * Now we'll allocate the entire DOF and copy it in -- provided 9701 * that the length isn't outrageous. 9702 */ 9703 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 9704 dtrace_dof_error(&hdr, "load size exceeds maximum"); 9705 *errp = E2BIG; 9706 return (NULL); 9707 } 9708 9709 if (hdr.dofh_loadsz < sizeof (hdr)) { 9710 dtrace_dof_error(&hdr, "invalid load size"); 9711 *errp = EINVAL; 9712 return (NULL); 9713 } 9714 9715 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 9716 9717 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 9718 kmem_free(dof, hdr.dofh_loadsz); 9719 *errp = EFAULT; 9720 return (NULL); 9721 } 9722 9723 return (dof); 9724 } 9725 9726 static dof_hdr_t * 9727 dtrace_dof_property(const char *name) 9728 { 9729 uchar_t *buf; 9730 uint64_t loadsz; 9731 unsigned int len, i; 9732 dof_hdr_t *dof; 9733 9734 /* 9735 * Unfortunately, array of values in .conf files are always (and 9736 * only) interpreted to be integer arrays. We must read our DOF 9737 * as an integer array, and then squeeze it into a byte array. 9738 */ 9739 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 9740 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 9741 return (NULL); 9742 9743 for (i = 0; i < len; i++) 9744 buf[i] = (uchar_t)(((int *)buf)[i]); 9745 9746 if (len < sizeof (dof_hdr_t)) { 9747 ddi_prop_free(buf); 9748 dtrace_dof_error(NULL, "truncated header"); 9749 return (NULL); 9750 } 9751 9752 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 9753 ddi_prop_free(buf); 9754 dtrace_dof_error(NULL, "truncated DOF"); 9755 return (NULL); 9756 } 9757 9758 if (loadsz >= dtrace_dof_maxsize) { 9759 ddi_prop_free(buf); 9760 dtrace_dof_error(NULL, "oversized DOF"); 9761 return (NULL); 9762 } 9763 9764 dof = kmem_alloc(loadsz, KM_SLEEP); 9765 bcopy(buf, dof, loadsz); 9766 ddi_prop_free(buf); 9767 9768 return (dof); 9769 } 9770 9771 static void 9772 dtrace_dof_destroy(dof_hdr_t *dof) 9773 { 9774 kmem_free(dof, dof->dofh_loadsz); 9775 } 9776 9777 /* 9778 * Return the dof_sec_t pointer corresponding to a given section index. If the 9779 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 9780 * a type other than DOF_SECT_NONE is specified, the header is checked against 9781 * this type and NULL is returned if the types do not match. 9782 */ 9783 static dof_sec_t * 9784 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 9785 { 9786 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 9787 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 9788 9789 if (i >= dof->dofh_secnum) { 9790 dtrace_dof_error(dof, "referenced section index is invalid"); 9791 return (NULL); 9792 } 9793 9794 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 9795 dtrace_dof_error(dof, "referenced section is not loadable"); 9796 return (NULL); 9797 } 9798 9799 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 9800 dtrace_dof_error(dof, "referenced section is the wrong type"); 9801 return (NULL); 9802 } 9803 9804 return (sec); 9805 } 9806 9807 static dtrace_probedesc_t * 9808 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 9809 { 9810 dof_probedesc_t *probe; 9811 dof_sec_t *strtab; 9812 uintptr_t daddr = (uintptr_t)dof; 9813 uintptr_t str; 9814 size_t size; 9815 9816 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 9817 dtrace_dof_error(dof, "invalid probe section"); 9818 return (NULL); 9819 } 9820 9821 if (sec->dofs_align != sizeof (dof_secidx_t)) { 9822 dtrace_dof_error(dof, "bad alignment in probe description"); 9823 return (NULL); 9824 } 9825 9826 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 9827 dtrace_dof_error(dof, "truncated probe description"); 9828 return (NULL); 9829 } 9830 9831 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 9832 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 9833 9834 if (strtab == NULL) 9835 return (NULL); 9836 9837 str = daddr + strtab->dofs_offset; 9838 size = strtab->dofs_size; 9839 9840 if (probe->dofp_provider >= strtab->dofs_size) { 9841 dtrace_dof_error(dof, "corrupt probe provider"); 9842 return (NULL); 9843 } 9844 9845 (void) strncpy(desc->dtpd_provider, 9846 (char *)(str + probe->dofp_provider), 9847 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 9848 9849 if (probe->dofp_mod >= strtab->dofs_size) { 9850 dtrace_dof_error(dof, "corrupt probe module"); 9851 return (NULL); 9852 } 9853 9854 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 9855 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 9856 9857 if (probe->dofp_func >= strtab->dofs_size) { 9858 dtrace_dof_error(dof, "corrupt probe function"); 9859 return (NULL); 9860 } 9861 9862 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 9863 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 9864 9865 if (probe->dofp_name >= strtab->dofs_size) { 9866 dtrace_dof_error(dof, "corrupt probe name"); 9867 return (NULL); 9868 } 9869 9870 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 9871 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 9872 9873 return (desc); 9874 } 9875 9876 static dtrace_difo_t * 9877 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 9878 cred_t *cr) 9879 { 9880 dtrace_difo_t *dp; 9881 size_t ttl = 0; 9882 dof_difohdr_t *dofd; 9883 uintptr_t daddr = (uintptr_t)dof; 9884 size_t max = dtrace_difo_maxsize; 9885 int i, l, n; 9886 9887 static const struct { 9888 int section; 9889 int bufoffs; 9890 int lenoffs; 9891 int entsize; 9892 int align; 9893 const char *msg; 9894 } difo[] = { 9895 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 9896 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 9897 sizeof (dif_instr_t), "multiple DIF sections" }, 9898 9899 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 9900 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 9901 sizeof (uint64_t), "multiple integer tables" }, 9902 9903 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 9904 offsetof(dtrace_difo_t, dtdo_strlen), 0, 9905 sizeof (char), "multiple string tables" }, 9906 9907 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 9908 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 9909 sizeof (uint_t), "multiple variable tables" }, 9910 9911 { DOF_SECT_NONE, 0, 0, 0, NULL } 9912 }; 9913 9914 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 9915 dtrace_dof_error(dof, "invalid DIFO header section"); 9916 return (NULL); 9917 } 9918 9919 if (sec->dofs_align != sizeof (dof_secidx_t)) { 9920 dtrace_dof_error(dof, "bad alignment in DIFO header"); 9921 return (NULL); 9922 } 9923 9924 if (sec->dofs_size < sizeof (dof_difohdr_t) || 9925 sec->dofs_size % sizeof (dof_secidx_t)) { 9926 dtrace_dof_error(dof, "bad size in DIFO header"); 9927 return (NULL); 9928 } 9929 9930 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 9931 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 9932 9933 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9934 dp->dtdo_rtype = dofd->dofd_rtype; 9935 9936 for (l = 0; l < n; l++) { 9937 dof_sec_t *subsec; 9938 void **bufp; 9939 uint32_t *lenp; 9940 9941 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 9942 dofd->dofd_links[l])) == NULL) 9943 goto err; /* invalid section link */ 9944 9945 if (ttl + subsec->dofs_size > max) { 9946 dtrace_dof_error(dof, "exceeds maximum size"); 9947 goto err; 9948 } 9949 9950 ttl += subsec->dofs_size; 9951 9952 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 9953 if (subsec->dofs_type != difo[i].section) 9954 continue; 9955 9956 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 9957 dtrace_dof_error(dof, "section not loaded"); 9958 goto err; 9959 } 9960 9961 if (subsec->dofs_align != difo[i].align) { 9962 dtrace_dof_error(dof, "bad alignment"); 9963 goto err; 9964 } 9965 9966 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 9967 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 9968 9969 if (*bufp != NULL) { 9970 dtrace_dof_error(dof, difo[i].msg); 9971 goto err; 9972 } 9973 9974 if (difo[i].entsize != subsec->dofs_entsize) { 9975 dtrace_dof_error(dof, "entry size mismatch"); 9976 goto err; 9977 } 9978 9979 if (subsec->dofs_entsize != 0 && 9980 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 9981 dtrace_dof_error(dof, "corrupt entry size"); 9982 goto err; 9983 } 9984 9985 *lenp = subsec->dofs_size; 9986 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 9987 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 9988 *bufp, subsec->dofs_size); 9989 9990 if (subsec->dofs_entsize != 0) 9991 *lenp /= subsec->dofs_entsize; 9992 9993 break; 9994 } 9995 9996 /* 9997 * If we encounter a loadable DIFO sub-section that is not 9998 * known to us, assume this is a broken program and fail. 9999 */ 10000 if (difo[i].section == DOF_SECT_NONE && 10001 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10002 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10003 goto err; 10004 } 10005 } 10006 10007 if (dp->dtdo_buf == NULL) { 10008 /* 10009 * We can't have a DIF object without DIF text. 10010 */ 10011 dtrace_dof_error(dof, "missing DIF text"); 10012 goto err; 10013 } 10014 10015 /* 10016 * Before we validate the DIF object, run through the variable table 10017 * looking for the strings -- if any of their size are under, we'll set 10018 * their size to be the system-wide default string size. Note that 10019 * this should _not_ happen if the "strsize" option has been set -- 10020 * in this case, the compiler should have set the size to reflect the 10021 * setting of the option. 10022 */ 10023 for (i = 0; i < dp->dtdo_varlen; i++) { 10024 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10025 dtrace_diftype_t *t = &v->dtdv_type; 10026 10027 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10028 continue; 10029 10030 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10031 t->dtdt_size = dtrace_strsize_default; 10032 } 10033 10034 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10035 goto err; 10036 10037 dtrace_difo_init(dp, vstate); 10038 return (dp); 10039 10040 err: 10041 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10042 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10043 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10044 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10045 10046 kmem_free(dp, sizeof (dtrace_difo_t)); 10047 return (NULL); 10048 } 10049 10050 static dtrace_predicate_t * 10051 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10052 cred_t *cr) 10053 { 10054 dtrace_difo_t *dp; 10055 10056 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10057 return (NULL); 10058 10059 return (dtrace_predicate_create(dp)); 10060 } 10061 10062 static dtrace_actdesc_t * 10063 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10064 cred_t *cr) 10065 { 10066 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10067 dof_actdesc_t *desc; 10068 dof_sec_t *difosec; 10069 size_t offs; 10070 uintptr_t daddr = (uintptr_t)dof; 10071 uint64_t arg; 10072 dtrace_actkind_t kind; 10073 10074 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10075 dtrace_dof_error(dof, "invalid action section"); 10076 return (NULL); 10077 } 10078 10079 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10080 dtrace_dof_error(dof, "truncated action description"); 10081 return (NULL); 10082 } 10083 10084 if (sec->dofs_align != sizeof (uint64_t)) { 10085 dtrace_dof_error(dof, "bad alignment in action description"); 10086 return (NULL); 10087 } 10088 10089 if (sec->dofs_size < sec->dofs_entsize) { 10090 dtrace_dof_error(dof, "section entry size exceeds total size"); 10091 return (NULL); 10092 } 10093 10094 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10095 dtrace_dof_error(dof, "bad entry size in action description"); 10096 return (NULL); 10097 } 10098 10099 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10100 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10101 return (NULL); 10102 } 10103 10104 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 10105 desc = (dof_actdesc_t *)(daddr + 10106 (uintptr_t)sec->dofs_offset + offs); 10107 kind = (dtrace_actkind_t)desc->dofa_kind; 10108 10109 if (DTRACEACT_ISPRINTFLIKE(kind) && 10110 (kind != DTRACEACT_PRINTA || 10111 desc->dofa_strtab != DOF_SECIDX_NONE)) { 10112 dof_sec_t *strtab; 10113 char *str, *fmt; 10114 uint64_t i; 10115 10116 /* 10117 * printf()-like actions must have a format string. 10118 */ 10119 if ((strtab = dtrace_dof_sect(dof, 10120 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 10121 goto err; 10122 10123 str = (char *)((uintptr_t)dof + 10124 (uintptr_t)strtab->dofs_offset); 10125 10126 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 10127 if (str[i] == '\0') 10128 break; 10129 } 10130 10131 if (i >= strtab->dofs_size) { 10132 dtrace_dof_error(dof, "bogus format string"); 10133 goto err; 10134 } 10135 10136 if (i == desc->dofa_arg) { 10137 dtrace_dof_error(dof, "empty format string"); 10138 goto err; 10139 } 10140 10141 i -= desc->dofa_arg; 10142 fmt = kmem_alloc(i + 1, KM_SLEEP); 10143 bcopy(&str[desc->dofa_arg], fmt, i + 1); 10144 arg = (uint64_t)(uintptr_t)fmt; 10145 } else { 10146 if (kind == DTRACEACT_PRINTA) { 10147 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 10148 arg = 0; 10149 } else { 10150 arg = desc->dofa_arg; 10151 } 10152 } 10153 10154 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 10155 desc->dofa_uarg, arg); 10156 10157 if (last != NULL) { 10158 last->dtad_next = act; 10159 } else { 10160 first = act; 10161 } 10162 10163 last = act; 10164 10165 if (desc->dofa_difo == DOF_SECIDX_NONE) 10166 continue; 10167 10168 if ((difosec = dtrace_dof_sect(dof, 10169 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 10170 goto err; 10171 10172 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 10173 10174 if (act->dtad_difo == NULL) 10175 goto err; 10176 } 10177 10178 ASSERT(first != NULL); 10179 return (first); 10180 10181 err: 10182 for (act = first; act != NULL; act = next) { 10183 next = act->dtad_next; 10184 dtrace_actdesc_release(act, vstate); 10185 } 10186 10187 return (NULL); 10188 } 10189 10190 static dtrace_ecbdesc_t * 10191 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10192 cred_t *cr) 10193 { 10194 dtrace_ecbdesc_t *ep; 10195 dof_ecbdesc_t *ecb; 10196 dtrace_probedesc_t *desc; 10197 dtrace_predicate_t *pred = NULL; 10198 10199 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 10200 dtrace_dof_error(dof, "truncated ECB description"); 10201 return (NULL); 10202 } 10203 10204 if (sec->dofs_align != sizeof (uint64_t)) { 10205 dtrace_dof_error(dof, "bad alignment in ECB description"); 10206 return (NULL); 10207 } 10208 10209 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 10210 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 10211 10212 if (sec == NULL) 10213 return (NULL); 10214 10215 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10216 ep->dted_uarg = ecb->dofe_uarg; 10217 desc = &ep->dted_probe; 10218 10219 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 10220 goto err; 10221 10222 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 10223 if ((sec = dtrace_dof_sect(dof, 10224 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 10225 goto err; 10226 10227 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 10228 goto err; 10229 10230 ep->dted_pred.dtpdd_predicate = pred; 10231 } 10232 10233 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 10234 if ((sec = dtrace_dof_sect(dof, 10235 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 10236 goto err; 10237 10238 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 10239 10240 if (ep->dted_action == NULL) 10241 goto err; 10242 } 10243 10244 return (ep); 10245 10246 err: 10247 if (pred != NULL) 10248 dtrace_predicate_release(pred, vstate); 10249 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10250 return (NULL); 10251 } 10252 10253 /* 10254 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 10255 * specified DOF. At present, this amounts to simply adding 'ubase' to the 10256 * site of any user SETX relocations to account for load object base address. 10257 * In the future, if we need other relocations, this function can be extended. 10258 */ 10259 static int 10260 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 10261 { 10262 uintptr_t daddr = (uintptr_t)dof; 10263 dof_relohdr_t *dofr = 10264 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10265 dof_sec_t *ss, *rs, *ts; 10266 dof_relodesc_t *r; 10267 uint_t i, n; 10268 10269 if (sec->dofs_size < sizeof (dof_relohdr_t) || 10270 sec->dofs_align != sizeof (dof_secidx_t)) { 10271 dtrace_dof_error(dof, "invalid relocation header"); 10272 return (-1); 10273 } 10274 10275 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 10276 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 10277 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 10278 10279 if (ss == NULL || rs == NULL || ts == NULL) 10280 return (-1); /* dtrace_dof_error() has been called already */ 10281 10282 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 10283 rs->dofs_align != sizeof (uint64_t)) { 10284 dtrace_dof_error(dof, "invalid relocation section"); 10285 return (-1); 10286 } 10287 10288 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 10289 n = rs->dofs_size / rs->dofs_entsize; 10290 10291 for (i = 0; i < n; i++) { 10292 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 10293 10294 switch (r->dofr_type) { 10295 case DOF_RELO_NONE: 10296 break; 10297 case DOF_RELO_SETX: 10298 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 10299 sizeof (uint64_t) > ts->dofs_size) { 10300 dtrace_dof_error(dof, "bad relocation offset"); 10301 return (-1); 10302 } 10303 10304 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 10305 dtrace_dof_error(dof, "misaligned setx relo"); 10306 return (-1); 10307 } 10308 10309 *(uint64_t *)taddr += ubase; 10310 break; 10311 default: 10312 dtrace_dof_error(dof, "invalid relocation type"); 10313 return (-1); 10314 } 10315 10316 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 10317 } 10318 10319 return (0); 10320 } 10321 10322 /* 10323 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 10324 * header: it should be at the front of a memory region that is at least 10325 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 10326 * size. It need not be validated in any other way. 10327 */ 10328 static int 10329 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 10330 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 10331 { 10332 uint64_t len = dof->dofh_loadsz, seclen; 10333 uintptr_t daddr = (uintptr_t)dof; 10334 dtrace_ecbdesc_t *ep; 10335 dtrace_enabling_t *enab; 10336 uint_t i; 10337 10338 ASSERT(MUTEX_HELD(&dtrace_lock)); 10339 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 10340 10341 /* 10342 * Check the DOF header identification bytes. In addition to checking 10343 * valid settings, we also verify that unused bits/bytes are zeroed so 10344 * we can use them later without fear of regressing existing binaries. 10345 */ 10346 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 10347 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 10348 dtrace_dof_error(dof, "DOF magic string mismatch"); 10349 return (-1); 10350 } 10351 10352 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 10353 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 10354 dtrace_dof_error(dof, "DOF has invalid data model"); 10355 return (-1); 10356 } 10357 10358 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 10359 dtrace_dof_error(dof, "DOF encoding mismatch"); 10360 return (-1); 10361 } 10362 10363 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 10364 dtrace_dof_error(dof, "DOF version mismatch"); 10365 return (-1); 10366 } 10367 10368 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 10369 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 10370 return (-1); 10371 } 10372 10373 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 10374 dtrace_dof_error(dof, "DOF uses too many integer registers"); 10375 return (-1); 10376 } 10377 10378 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 10379 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 10380 return (-1); 10381 } 10382 10383 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 10384 if (dof->dofh_ident[i] != 0) { 10385 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 10386 return (-1); 10387 } 10388 } 10389 10390 if (dof->dofh_flags & ~DOF_FL_VALID) { 10391 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 10392 return (-1); 10393 } 10394 10395 if (dof->dofh_secsize == 0) { 10396 dtrace_dof_error(dof, "zero section header size"); 10397 return (-1); 10398 } 10399 10400 /* 10401 * Check that the section headers don't exceed the amount of DOF 10402 * data. Note that we cast the section size and number of sections 10403 * to uint64_t's to prevent possible overflow in the multiplication. 10404 */ 10405 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 10406 10407 if (dof->dofh_secoff > len || seclen > len || 10408 dof->dofh_secoff + seclen > len) { 10409 dtrace_dof_error(dof, "truncated section headers"); 10410 return (-1); 10411 } 10412 10413 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 10414 dtrace_dof_error(dof, "misaligned section headers"); 10415 return (-1); 10416 } 10417 10418 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 10419 dtrace_dof_error(dof, "misaligned section size"); 10420 return (-1); 10421 } 10422 10423 /* 10424 * Take an initial pass through the section headers to be sure that 10425 * the headers don't have stray offsets. If the 'noprobes' flag is 10426 * set, do not permit sections relating to providers, probes, or args. 10427 */ 10428 for (i = 0; i < dof->dofh_secnum; i++) { 10429 dof_sec_t *sec = (dof_sec_t *)(daddr + 10430 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10431 10432 if (noprobes) { 10433 switch (sec->dofs_type) { 10434 case DOF_SECT_PROVIDER: 10435 case DOF_SECT_PROBES: 10436 case DOF_SECT_PRARGS: 10437 case DOF_SECT_PROFFS: 10438 dtrace_dof_error(dof, "illegal sections " 10439 "for enabling"); 10440 return (-1); 10441 } 10442 } 10443 10444 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10445 continue; /* just ignore non-loadable sections */ 10446 10447 if (sec->dofs_align & (sec->dofs_align - 1)) { 10448 dtrace_dof_error(dof, "bad section alignment"); 10449 return (-1); 10450 } 10451 10452 if (sec->dofs_offset & (sec->dofs_align - 1)) { 10453 dtrace_dof_error(dof, "misaligned section"); 10454 return (-1); 10455 } 10456 10457 if (sec->dofs_offset > len || sec->dofs_size > len || 10458 sec->dofs_offset + sec->dofs_size > len) { 10459 dtrace_dof_error(dof, "corrupt section header"); 10460 return (-1); 10461 } 10462 10463 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 10464 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 10465 dtrace_dof_error(dof, "non-terminating string table"); 10466 return (-1); 10467 } 10468 } 10469 10470 /* 10471 * Take a second pass through the sections and locate and perform any 10472 * relocations that are present. We do this after the first pass to 10473 * be sure that all sections have had their headers validated. 10474 */ 10475 for (i = 0; i < dof->dofh_secnum; i++) { 10476 dof_sec_t *sec = (dof_sec_t *)(daddr + 10477 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10478 10479 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10480 continue; /* skip sections that are not loadable */ 10481 10482 switch (sec->dofs_type) { 10483 case DOF_SECT_URELHDR: 10484 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 10485 return (-1); 10486 break; 10487 } 10488 } 10489 10490 if ((enab = *enabp) == NULL) 10491 enab = *enabp = dtrace_enabling_create(vstate); 10492 10493 for (i = 0; i < dof->dofh_secnum; i++) { 10494 dof_sec_t *sec = (dof_sec_t *)(daddr + 10495 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10496 10497 if (sec->dofs_type != DOF_SECT_ECBDESC) 10498 continue; 10499 10500 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 10501 dtrace_enabling_destroy(enab); 10502 *enabp = NULL; 10503 return (-1); 10504 } 10505 10506 dtrace_enabling_add(enab, ep); 10507 } 10508 10509 return (0); 10510 } 10511 10512 /* 10513 * Process DOF for any options. This routine assumes that the DOF has been 10514 * at least processed by dtrace_dof_slurp(). 10515 */ 10516 static int 10517 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 10518 { 10519 int i, rval; 10520 uint32_t entsize; 10521 size_t offs; 10522 dof_optdesc_t *desc; 10523 10524 for (i = 0; i < dof->dofh_secnum; i++) { 10525 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 10526 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10527 10528 if (sec->dofs_type != DOF_SECT_OPTDESC) 10529 continue; 10530 10531 if (sec->dofs_align != sizeof (uint64_t)) { 10532 dtrace_dof_error(dof, "bad alignment in " 10533 "option description"); 10534 return (EINVAL); 10535 } 10536 10537 if ((entsize = sec->dofs_entsize) == 0) { 10538 dtrace_dof_error(dof, "zeroed option entry size"); 10539 return (EINVAL); 10540 } 10541 10542 if (entsize < sizeof (dof_optdesc_t)) { 10543 dtrace_dof_error(dof, "bad option entry size"); 10544 return (EINVAL); 10545 } 10546 10547 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 10548 desc = (dof_optdesc_t *)((uintptr_t)dof + 10549 (uintptr_t)sec->dofs_offset + offs); 10550 10551 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 10552 dtrace_dof_error(dof, "non-zero option string"); 10553 return (EINVAL); 10554 } 10555 10556 if (desc->dofo_value == DTRACEOPT_UNSET) { 10557 dtrace_dof_error(dof, "unset option"); 10558 return (EINVAL); 10559 } 10560 10561 if ((rval = dtrace_state_option(state, 10562 desc->dofo_option, desc->dofo_value)) != 0) { 10563 dtrace_dof_error(dof, "rejected option"); 10564 return (rval); 10565 } 10566 } 10567 } 10568 10569 return (0); 10570 } 10571 10572 /* 10573 * DTrace Consumer State Functions 10574 */ 10575 int 10576 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 10577 { 10578 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 10579 void *base; 10580 uintptr_t limit; 10581 dtrace_dynvar_t *dvar, *next, *start; 10582 int i; 10583 10584 ASSERT(MUTEX_HELD(&dtrace_lock)); 10585 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 10586 10587 bzero(dstate, sizeof (dtrace_dstate_t)); 10588 10589 if ((dstate->dtds_chunksize = chunksize) == 0) 10590 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 10591 10592 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 10593 size = min; 10594 10595 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10596 return (ENOMEM); 10597 10598 dstate->dtds_size = size; 10599 dstate->dtds_base = base; 10600 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 10601 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 10602 10603 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 10604 10605 if (hashsize != 1 && (hashsize & 1)) 10606 hashsize--; 10607 10608 dstate->dtds_hashsize = hashsize; 10609 dstate->dtds_hash = dstate->dtds_base; 10610 10611 /* 10612 * Determine number of active CPUs. Divide free list evenly among 10613 * active CPUs. 10614 */ 10615 start = (dtrace_dynvar_t *) 10616 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 10617 limit = (uintptr_t)base + size; 10618 10619 maxper = (limit - (uintptr_t)start) / NCPU; 10620 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 10621 10622 for (i = 0; i < NCPU; i++) { 10623 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 10624 10625 /* 10626 * If we don't even have enough chunks to make it once through 10627 * NCPUs, we're just going to allocate everything to the first 10628 * CPU. And if we're on the last CPU, we're going to allocate 10629 * whatever is left over. In either case, we set the limit to 10630 * be the limit of the dynamic variable space. 10631 */ 10632 if (maxper == 0 || i == NCPU - 1) { 10633 limit = (uintptr_t)base + size; 10634 start = NULL; 10635 } else { 10636 limit = (uintptr_t)start + maxper; 10637 start = (dtrace_dynvar_t *)limit; 10638 } 10639 10640 ASSERT(limit <= (uintptr_t)base + size); 10641 10642 for (;;) { 10643 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 10644 dstate->dtds_chunksize); 10645 10646 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 10647 break; 10648 10649 dvar->dtdv_next = next; 10650 dvar = next; 10651 } 10652 10653 if (maxper == 0) 10654 break; 10655 } 10656 10657 return (0); 10658 } 10659 10660 void 10661 dtrace_dstate_fini(dtrace_dstate_t *dstate) 10662 { 10663 ASSERT(MUTEX_HELD(&cpu_lock)); 10664 10665 if (dstate->dtds_base == NULL) 10666 return; 10667 10668 kmem_free(dstate->dtds_base, dstate->dtds_size); 10669 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 10670 } 10671 10672 static void 10673 dtrace_vstate_fini(dtrace_vstate_t *vstate) 10674 { 10675 /* 10676 * Logical XOR, where are you? 10677 */ 10678 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 10679 10680 if (vstate->dtvs_nglobals > 0) { 10681 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 10682 sizeof (dtrace_statvar_t *)); 10683 } 10684 10685 if (vstate->dtvs_ntlocals > 0) { 10686 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 10687 sizeof (dtrace_difv_t)); 10688 } 10689 10690 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 10691 10692 if (vstate->dtvs_nlocals > 0) { 10693 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 10694 sizeof (dtrace_statvar_t *)); 10695 } 10696 } 10697 10698 static void 10699 dtrace_state_clean(dtrace_state_t *state) 10700 { 10701 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 10702 return; 10703 10704 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 10705 dtrace_speculation_clean(state); 10706 } 10707 10708 static void 10709 dtrace_state_deadman(dtrace_state_t *state) 10710 { 10711 hrtime_t now; 10712 10713 dtrace_sync(); 10714 10715 now = dtrace_gethrtime(); 10716 10717 if (state != dtrace_anon.dta_state && 10718 now - state->dts_laststatus >= dtrace_deadman_user) 10719 return; 10720 10721 /* 10722 * We must be sure that dts_alive never appears to be less than the 10723 * value upon entry to dtrace_state_deadman(), and because we lack a 10724 * dtrace_cas64(), we cannot store to it atomically. We thus instead 10725 * store INT64_MAX to it, followed by a memory barrier, followed by 10726 * the new value. This assures that dts_alive never appears to be 10727 * less than its true value, regardless of the order in which the 10728 * stores to the underlying storage are issued. 10729 */ 10730 state->dts_alive = INT64_MAX; 10731 dtrace_membar_producer(); 10732 state->dts_alive = now; 10733 } 10734 10735 dtrace_state_t * 10736 dtrace_state_create(dev_t *devp, cred_t *cr) 10737 { 10738 minor_t minor; 10739 major_t major; 10740 char c[30]; 10741 dtrace_state_t *state; 10742 dtrace_optval_t *opt; 10743 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 10744 10745 ASSERT(MUTEX_HELD(&dtrace_lock)); 10746 ASSERT(MUTEX_HELD(&cpu_lock)); 10747 10748 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 10749 VM_BESTFIT | VM_SLEEP); 10750 10751 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 10752 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 10753 return (NULL); 10754 } 10755 10756 state = ddi_get_soft_state(dtrace_softstate, minor); 10757 state->dts_epid = DTRACE_EPIDNONE + 1; 10758 10759 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 10760 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 10761 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 10762 10763 if (devp != NULL) { 10764 major = getemajor(*devp); 10765 } else { 10766 major = ddi_driver_major(dtrace_devi); 10767 } 10768 10769 state->dts_dev = makedevice(major, minor); 10770 10771 if (devp != NULL) 10772 *devp = state->dts_dev; 10773 10774 /* 10775 * We allocate NCPU buffers. On the one hand, this can be quite 10776 * a bit of memory per instance (nearly 36K on a Starcat). On the 10777 * other hand, it saves an additional memory reference in the probe 10778 * path. 10779 */ 10780 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 10781 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 10782 state->dts_cleaner = CYCLIC_NONE; 10783 state->dts_deadman = CYCLIC_NONE; 10784 state->dts_vstate.dtvs_state = state; 10785 10786 for (i = 0; i < DTRACEOPT_MAX; i++) 10787 state->dts_options[i] = DTRACEOPT_UNSET; 10788 10789 /* 10790 * Set the default options. 10791 */ 10792 opt = state->dts_options; 10793 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 10794 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 10795 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 10796 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 10797 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 10798 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 10799 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 10800 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 10801 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 10802 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 10803 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 10804 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 10805 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 10806 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 10807 10808 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 10809 10810 /* 10811 * Set up the credentials for this instantiation. 10812 */ 10813 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 10814 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 10815 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 10816 } else { 10817 state->dts_cred.dcr_uid = crgetuid(cr); 10818 state->dts_cred.dcr_gid = crgetgid(cr); 10819 10820 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 10821 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 10822 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 10823 } 10824 10825 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) && 10826 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 10827 state->dts_cred.dcr_visible |= DTRACE_CRV_ALLPROC; 10828 state->dts_cred.dcr_action |= 10829 DTRACE_CRA_PROC_DESTRUCTIVE; 10830 } 10831 10832 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 10833 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 10834 DTRACE_CRV_ALLPROC; 10835 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 10836 DTRACE_CRA_PROC; 10837 10838 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 10839 state->dts_cred.dcr_action |= 10840 DTRACE_CRA_PROC_DESTRUCTIVE; 10841 } 10842 } 10843 10844 return (state); 10845 } 10846 10847 static int 10848 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 10849 { 10850 dtrace_optval_t *opt = state->dts_options, size; 10851 processorid_t cpu; 10852 int flags = 0, rval; 10853 10854 ASSERT(MUTEX_HELD(&dtrace_lock)); 10855 ASSERT(MUTEX_HELD(&cpu_lock)); 10856 ASSERT(which < DTRACEOPT_MAX); 10857 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 10858 (state == dtrace_anon.dta_state && 10859 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 10860 10861 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 10862 return (0); 10863 10864 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 10865 cpu = opt[DTRACEOPT_CPU]; 10866 10867 if (which == DTRACEOPT_SPECSIZE) 10868 flags |= DTRACEBUF_NOSWITCH; 10869 10870 if (which == DTRACEOPT_BUFSIZE) { 10871 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 10872 flags |= DTRACEBUF_RING; 10873 10874 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 10875 flags |= DTRACEBUF_FILL; 10876 10877 flags |= DTRACEBUF_INACTIVE; 10878 } 10879 10880 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 10881 /* 10882 * The size must be 8-byte aligned. If the size is not 8-byte 10883 * aligned, drop it down by the difference. 10884 */ 10885 if (size & (sizeof (uint64_t) - 1)) 10886 size -= size & (sizeof (uint64_t) - 1); 10887 10888 if (size < state->dts_reserve) { 10889 /* 10890 * Buffers always must be large enough to accommodate 10891 * their prereserved space. We return E2BIG instead 10892 * of ENOMEM in this case to allow for user-level 10893 * software to differentiate the cases. 10894 */ 10895 return (E2BIG); 10896 } 10897 10898 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 10899 10900 if (rval != ENOMEM) { 10901 opt[which] = size; 10902 return (rval); 10903 } 10904 10905 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 10906 return (rval); 10907 } 10908 10909 return (ENOMEM); 10910 } 10911 10912 static int 10913 dtrace_state_buffers(dtrace_state_t *state) 10914 { 10915 dtrace_speculation_t *spec = state->dts_speculations; 10916 int rval, i; 10917 10918 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 10919 DTRACEOPT_BUFSIZE)) != 0) 10920 return (rval); 10921 10922 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 10923 DTRACEOPT_AGGSIZE)) != 0) 10924 return (rval); 10925 10926 for (i = 0; i < state->dts_nspeculations; i++) { 10927 if ((rval = dtrace_state_buffer(state, 10928 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 10929 return (rval); 10930 } 10931 10932 return (0); 10933 } 10934 10935 static void 10936 dtrace_state_prereserve(dtrace_state_t *state) 10937 { 10938 dtrace_ecb_t *ecb; 10939 dtrace_probe_t *probe; 10940 10941 state->dts_reserve = 0; 10942 10943 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 10944 return; 10945 10946 /* 10947 * If our buffer policy is a "fill" buffer policy, we need to set the 10948 * prereserved space to be the space required by the END probes. 10949 */ 10950 probe = dtrace_probes[dtrace_probeid_end - 1]; 10951 ASSERT(probe != NULL); 10952 10953 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 10954 if (ecb->dte_state != state) 10955 continue; 10956 10957 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 10958 } 10959 } 10960 10961 static int 10962 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 10963 { 10964 dtrace_optval_t *opt = state->dts_options, sz, nspec; 10965 dtrace_speculation_t *spec; 10966 dtrace_buffer_t *buf; 10967 cyc_handler_t hdlr; 10968 cyc_time_t when; 10969 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 10970 dtrace_icookie_t cookie; 10971 10972 mutex_enter(&cpu_lock); 10973 mutex_enter(&dtrace_lock); 10974 10975 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 10976 rval = EBUSY; 10977 goto out; 10978 } 10979 10980 /* 10981 * Before we can perform any checks, we must prime all of the 10982 * retained enablings that correspond to this state. 10983 */ 10984 dtrace_enabling_prime(state); 10985 10986 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 10987 rval = EACCES; 10988 goto out; 10989 } 10990 10991 dtrace_state_prereserve(state); 10992 10993 /* 10994 * Now we want to do is try to allocate our speculations. 10995 * We do not automatically resize the number of speculations; if 10996 * this fails, we will fail the operation. 10997 */ 10998 nspec = opt[DTRACEOPT_NSPEC]; 10999 ASSERT(nspec != DTRACEOPT_UNSET); 11000 11001 if (nspec > INT_MAX) { 11002 rval = ENOMEM; 11003 goto out; 11004 } 11005 11006 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 11007 11008 if (spec == NULL) { 11009 rval = ENOMEM; 11010 goto out; 11011 } 11012 11013 state->dts_speculations = spec; 11014 state->dts_nspeculations = (int)nspec; 11015 11016 for (i = 0; i < nspec; i++) { 11017 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 11018 rval = ENOMEM; 11019 goto err; 11020 } 11021 11022 spec[i].dtsp_buffer = buf; 11023 } 11024 11025 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 11026 if (dtrace_anon.dta_state == NULL) { 11027 rval = ENOENT; 11028 goto out; 11029 } 11030 11031 if (state->dts_necbs != 0) { 11032 rval = EALREADY; 11033 goto out; 11034 } 11035 11036 state->dts_anon = dtrace_anon_grab(); 11037 ASSERT(state->dts_anon != NULL); 11038 11039 *cpu = dtrace_anon.dta_beganon; 11040 11041 /* 11042 * If the anonymous state is active (as it almost certainly 11043 * is if the anonymous enabling ultimately matched anything), 11044 * we don't allow any further option processing -- but we 11045 * don't return failure. 11046 */ 11047 state = state->dts_anon; 11048 11049 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11050 goto out; 11051 } 11052 11053 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 11054 opt[DTRACEOPT_AGGSIZE] != 0) { 11055 if (state->dts_aggregations == NULL) { 11056 /* 11057 * We're not going to create an aggregation buffer 11058 * because we don't have any ECBs that contain 11059 * aggregations -- set this option to 0. 11060 */ 11061 opt[DTRACEOPT_AGGSIZE] = 0; 11062 } else { 11063 /* 11064 * If we have an aggregation buffer, we must also have 11065 * a buffer to use as scratch. 11066 */ 11067 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 11068 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 11069 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 11070 } 11071 } 11072 } 11073 11074 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 11075 opt[DTRACEOPT_SPECSIZE] != 0) { 11076 if (!state->dts_speculates) { 11077 /* 11078 * We're not going to create speculation buffers 11079 * because we don't have any ECBs that actually 11080 * speculate -- set the speculation size to 0. 11081 */ 11082 opt[DTRACEOPT_SPECSIZE] = 0; 11083 } 11084 } 11085 11086 /* 11087 * The bare minimum size for any buffer that we're actually going to 11088 * do anything to is sizeof (uint64_t). 11089 */ 11090 sz = sizeof (uint64_t); 11091 11092 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 11093 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 11094 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 11095 /* 11096 * A buffer size has been explicitly set to 0 (or to a size 11097 * that will be adjusted to 0) and we need the space -- we 11098 * need to return failure. We return ENOSPC to differentiate 11099 * it from failing to allocate a buffer due to failure to meet 11100 * the reserve (for which we return E2BIG). 11101 */ 11102 rval = ENOSPC; 11103 goto out; 11104 } 11105 11106 if ((rval = dtrace_state_buffers(state)) != 0) 11107 goto err; 11108 11109 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 11110 sz = dtrace_dstate_defsize; 11111 11112 do { 11113 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 11114 11115 if (rval == 0) 11116 break; 11117 11118 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11119 goto err; 11120 } while (sz >>= 1); 11121 11122 opt[DTRACEOPT_DYNVARSIZE] = sz; 11123 11124 if (rval != 0) 11125 goto err; 11126 11127 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 11128 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 11129 11130 if (opt[DTRACEOPT_CLEANRATE] == 0) 11131 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11132 11133 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 11134 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 11135 11136 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 11137 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11138 11139 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 11140 hdlr.cyh_arg = state; 11141 hdlr.cyh_level = CY_LOW_LEVEL; 11142 11143 when.cyt_when = 0; 11144 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 11145 11146 state->dts_cleaner = cyclic_add(&hdlr, &when); 11147 11148 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 11149 hdlr.cyh_arg = state; 11150 hdlr.cyh_level = CY_LOW_LEVEL; 11151 11152 when.cyt_when = 0; 11153 when.cyt_interval = dtrace_deadman_interval; 11154 11155 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 11156 state->dts_deadman = cyclic_add(&hdlr, &when); 11157 11158 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 11159 11160 /* 11161 * Now it's time to actually fire the BEGIN probe. We need to disable 11162 * interrupts here both to record the CPU on which we fired the BEGIN 11163 * probe (the data from this CPU will be processed first at user 11164 * level) and to manually activate the buffer for this CPU. 11165 */ 11166 cookie = dtrace_interrupt_disable(); 11167 *cpu = CPU->cpu_id; 11168 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 11169 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11170 11171 dtrace_probe(dtrace_probeid_begin, 11172 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11173 dtrace_interrupt_enable(cookie); 11174 /* 11175 * We may have had an exit action from a BEGIN probe; only change our 11176 * state to ACTIVE if we're still in WARMUP. 11177 */ 11178 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 11179 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 11180 11181 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 11182 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 11183 11184 /* 11185 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 11186 * want each CPU to transition its principal buffer out of the 11187 * INACTIVE state. Doing this assures that no CPU will suddenly begin 11188 * processing an ECB halfway down a probe's ECB chain; all CPUs will 11189 * atomically transition from processing none of a state's ECBs to 11190 * processing all of them. 11191 */ 11192 dtrace_xcall(DTRACE_CPUALL, 11193 (dtrace_xcall_t)dtrace_buffer_activate, state); 11194 goto out; 11195 11196 err: 11197 dtrace_buffer_free(state->dts_buffer); 11198 dtrace_buffer_free(state->dts_aggbuffer); 11199 11200 if ((nspec = state->dts_nspeculations) == 0) { 11201 ASSERT(state->dts_speculations == NULL); 11202 goto out; 11203 } 11204 11205 spec = state->dts_speculations; 11206 ASSERT(spec != NULL); 11207 11208 for (i = 0; i < state->dts_nspeculations; i++) { 11209 if ((buf = spec[i].dtsp_buffer) == NULL) 11210 break; 11211 11212 dtrace_buffer_free(buf); 11213 kmem_free(buf, bufsize); 11214 } 11215 11216 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11217 state->dts_nspeculations = 0; 11218 state->dts_speculations = NULL; 11219 11220 out: 11221 mutex_exit(&dtrace_lock); 11222 mutex_exit(&cpu_lock); 11223 11224 return (rval); 11225 } 11226 11227 static int 11228 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 11229 { 11230 dtrace_icookie_t cookie; 11231 11232 ASSERT(MUTEX_HELD(&dtrace_lock)); 11233 11234 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 11235 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 11236 return (EINVAL); 11237 11238 /* 11239 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 11240 * to be sure that every CPU has seen it. See below for the details 11241 * on why this is done. 11242 */ 11243 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 11244 dtrace_sync(); 11245 11246 /* 11247 * By this point, it is impossible for any CPU to be still processing 11248 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 11249 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 11250 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 11251 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 11252 * iff we're in the END probe. 11253 */ 11254 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 11255 dtrace_sync(); 11256 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 11257 11258 /* 11259 * Finally, we can release the reserve and call the END probe. We 11260 * disable interrupts across calling the END probe to allow us to 11261 * return the CPU on which we actually called the END probe. This 11262 * allows user-land to be sure that this CPU's principal buffer is 11263 * processed last. 11264 */ 11265 state->dts_reserve = 0; 11266 11267 cookie = dtrace_interrupt_disable(); 11268 *cpu = CPU->cpu_id; 11269 dtrace_probe(dtrace_probeid_end, 11270 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11271 dtrace_interrupt_enable(cookie); 11272 11273 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 11274 dtrace_sync(); 11275 11276 return (0); 11277 } 11278 11279 static int 11280 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 11281 dtrace_optval_t val) 11282 { 11283 ASSERT(MUTEX_HELD(&dtrace_lock)); 11284 11285 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11286 return (EBUSY); 11287 11288 if (option >= DTRACEOPT_MAX) 11289 return (EINVAL); 11290 11291 if (option != DTRACEOPT_CPU && val < 0) 11292 return (EINVAL); 11293 11294 switch (option) { 11295 case DTRACEOPT_DESTRUCTIVE: 11296 if (dtrace_destructive_disallow) 11297 return (EACCES); 11298 11299 state->dts_cred.dcr_destructive = 1; 11300 break; 11301 11302 case DTRACEOPT_BUFSIZE: 11303 case DTRACEOPT_DYNVARSIZE: 11304 case DTRACEOPT_AGGSIZE: 11305 case DTRACEOPT_SPECSIZE: 11306 case DTRACEOPT_STRSIZE: 11307 if (val < 0) 11308 return (EINVAL); 11309 11310 if (val >= LONG_MAX) { 11311 /* 11312 * If this is an otherwise negative value, set it to 11313 * the highest multiple of 128m less than LONG_MAX. 11314 * Technically, we're adjusting the size without 11315 * regard to the buffer resizing policy, but in fact, 11316 * this has no effect -- if we set the buffer size to 11317 * ~LONG_MAX and the buffer policy is ultimately set to 11318 * be "manual", the buffer allocation is guaranteed to 11319 * fail, if only because the allocation requires two 11320 * buffers. (We set the the size to the highest 11321 * multiple of 128m because it ensures that the size 11322 * will remain a multiple of a megabyte when 11323 * repeatedly halved -- all the way down to 15m.) 11324 */ 11325 val = LONG_MAX - (1 << 27) + 1; 11326 } 11327 } 11328 11329 state->dts_options[option] = val; 11330 11331 return (0); 11332 } 11333 11334 static void 11335 dtrace_state_destroy(dtrace_state_t *state) 11336 { 11337 dtrace_ecb_t *ecb; 11338 dtrace_vstate_t *vstate = &state->dts_vstate; 11339 minor_t minor = getminor(state->dts_dev); 11340 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11341 dtrace_speculation_t *spec = state->dts_speculations; 11342 int nspec = state->dts_nspeculations; 11343 uint32_t match; 11344 11345 ASSERT(MUTEX_HELD(&dtrace_lock)); 11346 ASSERT(MUTEX_HELD(&cpu_lock)); 11347 11348 /* 11349 * First, retract any retained enablings for this state. 11350 */ 11351 dtrace_enabling_retract(state); 11352 ASSERT(state->dts_nretained == 0); 11353 11354 /* 11355 * Now we need to disable and destroy any enabled probes. Because any 11356 * DTRACE_PRIV_KERNEL probes may actually be slowing our progress 11357 * (especially if they're all enabled), we take two passes through 11358 * the ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, 11359 * and in the second we disable whatever is left over. 11360 */ 11361 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 11362 for (i = 0; i < state->dts_necbs; i++) { 11363 if ((ecb = state->dts_ecbs[i]) == NULL) 11364 continue; 11365 11366 if (match && ecb->dte_probe != NULL) { 11367 dtrace_probe_t *probe = ecb->dte_probe; 11368 dtrace_provider_t *prov = probe->dtpr_provider; 11369 11370 if (!(prov->dtpv_priv.dtpp_flags & match)) 11371 continue; 11372 } 11373 11374 dtrace_ecb_disable(ecb); 11375 dtrace_ecb_destroy(ecb); 11376 } 11377 11378 if (!match) 11379 break; 11380 } 11381 11382 /* 11383 * Before we free the buffers, perform one more sync to assure that 11384 * every CPU is out of probe context. 11385 */ 11386 dtrace_sync(); 11387 11388 dtrace_buffer_free(state->dts_buffer); 11389 dtrace_buffer_free(state->dts_aggbuffer); 11390 11391 for (i = 0; i < nspec; i++) 11392 dtrace_buffer_free(spec[i].dtsp_buffer); 11393 11394 if (state->dts_cleaner != CYCLIC_NONE) 11395 cyclic_remove(state->dts_cleaner); 11396 11397 if (state->dts_deadman != CYCLIC_NONE) 11398 cyclic_remove(state->dts_deadman); 11399 11400 dtrace_dstate_fini(&vstate->dtvs_dynvars); 11401 dtrace_vstate_fini(vstate); 11402 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 11403 11404 if (state->dts_aggregations != NULL) { 11405 #ifdef DEBUG 11406 for (i = 0; i < state->dts_naggregations; i++) 11407 ASSERT(state->dts_aggregations[i] == NULL); 11408 #endif 11409 ASSERT(state->dts_naggregations > 0); 11410 kmem_free(state->dts_aggregations, 11411 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 11412 } 11413 11414 kmem_free(state->dts_buffer, bufsize); 11415 kmem_free(state->dts_aggbuffer, bufsize); 11416 11417 for (i = 0; i < nspec; i++) 11418 kmem_free(spec[i].dtsp_buffer, bufsize); 11419 11420 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11421 11422 dtrace_format_destroy(state); 11423 11424 vmem_destroy(state->dts_aggid_arena); 11425 ddi_soft_state_free(dtrace_softstate, minor); 11426 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11427 } 11428 11429 /* 11430 * DTrace Anonymous Enabling Functions 11431 */ 11432 static dtrace_state_t * 11433 dtrace_anon_grab(void) 11434 { 11435 dtrace_state_t *state; 11436 11437 ASSERT(MUTEX_HELD(&dtrace_lock)); 11438 11439 if ((state = dtrace_anon.dta_state) == NULL) { 11440 ASSERT(dtrace_anon.dta_enabling == NULL); 11441 return (NULL); 11442 } 11443 11444 ASSERT(dtrace_anon.dta_enabling != NULL); 11445 ASSERT(dtrace_retained != NULL); 11446 11447 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 11448 dtrace_anon.dta_enabling = NULL; 11449 dtrace_anon.dta_state = NULL; 11450 11451 return (state); 11452 } 11453 11454 static void 11455 dtrace_anon_property(void) 11456 { 11457 int i, rv; 11458 dtrace_state_t *state; 11459 dof_hdr_t *dof; 11460 char c[32]; /* enough for "dof-data-" + digits */ 11461 11462 ASSERT(MUTEX_HELD(&dtrace_lock)); 11463 ASSERT(MUTEX_HELD(&cpu_lock)); 11464 11465 for (i = 0; ; i++) { 11466 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 11467 11468 dtrace_err_verbose = 1; 11469 11470 if ((dof = dtrace_dof_property(c)) == NULL) { 11471 dtrace_err_verbose = 0; 11472 break; 11473 } 11474 11475 /* 11476 * We want to create anonymous state, so we need to transition 11477 * the kernel debugger to indicate that DTrace is active. If 11478 * this fails (e.g. because the debugger has modified text in 11479 * some way), we won't continue with the processing. 11480 */ 11481 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 11482 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 11483 "enabling ignored."); 11484 dtrace_dof_destroy(dof); 11485 break; 11486 } 11487 11488 /* 11489 * If we haven't allocated an anonymous state, we'll do so now. 11490 */ 11491 if ((state = dtrace_anon.dta_state) == NULL) { 11492 state = dtrace_state_create(NULL, NULL); 11493 dtrace_anon.dta_state = state; 11494 11495 if (state == NULL) { 11496 /* 11497 * This basically shouldn't happen: the only 11498 * failure mode from dtrace_state_create() is a 11499 * failure of ddi_soft_state_zalloc() that 11500 * itself should never happen. Still, the 11501 * interface allows for a failure mode, and 11502 * we want to fail as gracefully as possible: 11503 * we'll emit an error message and cease 11504 * processing anonymous state in this case. 11505 */ 11506 cmn_err(CE_WARN, "failed to create " 11507 "anonymous state"); 11508 dtrace_dof_destroy(dof); 11509 break; 11510 } 11511 } 11512 11513 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 11514 &dtrace_anon.dta_enabling, 0, B_TRUE); 11515 11516 if (rv == 0) 11517 rv = dtrace_dof_options(dof, state); 11518 11519 dtrace_err_verbose = 0; 11520 dtrace_dof_destroy(dof); 11521 11522 if (rv != 0) { 11523 /* 11524 * This is malformed DOF; chuck any anonymous state 11525 * that we created. 11526 */ 11527 ASSERT(dtrace_anon.dta_enabling == NULL); 11528 dtrace_state_destroy(state); 11529 dtrace_anon.dta_state = NULL; 11530 break; 11531 } 11532 11533 ASSERT(dtrace_anon.dta_enabling != NULL); 11534 } 11535 11536 if (dtrace_anon.dta_enabling != NULL) { 11537 int rval; 11538 11539 /* 11540 * dtrace_enabling_retain() can only fail because we are 11541 * trying to retain more enablings than are allowed -- but 11542 * we only have one anonymous enabling, and we are guaranteed 11543 * to be allowed at least one retained enabling; we assert 11544 * that dtrace_enabling_retain() returns success. 11545 */ 11546 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 11547 ASSERT(rval == 0); 11548 11549 dtrace_enabling_dump(dtrace_anon.dta_enabling); 11550 } 11551 } 11552 11553 /* 11554 * DTrace Helper Functions 11555 */ 11556 static void 11557 dtrace_helper_trace(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate, 11558 int where) 11559 { 11560 uint32_t size, next, nnext, i; 11561 dtrace_helptrace_t *ent; 11562 11563 if (!dtrace_helptrace_enabled) 11564 return; 11565 11566 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 11567 11568 /* 11569 * What would a tracing framework be without its own tracing 11570 * framework? (Well, a hell of a lot simpler, for starters...) 11571 */ 11572 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 11573 sizeof (uint64_t) - sizeof (uint64_t); 11574 11575 /* 11576 * Iterate until we can allocate a slot in the trace buffer. 11577 */ 11578 do { 11579 next = dtrace_helptrace_next; 11580 11581 if (next + size < dtrace_helptrace_bufsize) { 11582 nnext = next + size; 11583 } else { 11584 nnext = size; 11585 } 11586 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 11587 11588 /* 11589 * We have our slot; fill it in. 11590 */ 11591 if (nnext == size) 11592 next = 0; 11593 11594 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 11595 ent->dtht_helper = helper; 11596 ent->dtht_where = where; 11597 ent->dtht_nlocals = vstate->dtvs_nlocals; 11598 11599 for (i = 0; i < vstate->dtvs_nlocals; i++) { 11600 dtrace_statvar_t *svar; 11601 11602 if ((svar = vstate->dtvs_locals[i]) == NULL) 11603 continue; 11604 11605 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 11606 ent->dtht_locals[i] = 11607 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 11608 } 11609 } 11610 11611 static uint64_t 11612 dtrace_helper(int which, dtrace_mstate_t *mstate, 11613 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 11614 { 11615 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 11616 uint64_t sarg0 = mstate->dtms_arg[0]; 11617 uint64_t sarg1 = mstate->dtms_arg[1]; 11618 uint64_t rval; 11619 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 11620 dtrace_helper_action_t *helper; 11621 dtrace_vstate_t *vstate; 11622 dtrace_difo_t *pred; 11623 int i, trace = dtrace_helptrace_enabled; 11624 11625 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 11626 11627 if (helpers == NULL) 11628 return (0); 11629 11630 if ((helper = helpers->dthps_actions[which]) == NULL) 11631 return (0); 11632 11633 vstate = &helpers->dthps_vstate; 11634 mstate->dtms_arg[0] = arg0; 11635 mstate->dtms_arg[1] = arg1; 11636 11637 /* 11638 * Now iterate over each helper. If its predicate evaluates to 'true', 11639 * we'll call the corresponding actions. Note that the below calls 11640 * to dtrace_dif_emulate() may set faults in machine state. This is 11641 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 11642 * the stored DIF offset with its own (which is the desired behavior). 11643 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 11644 * from machine state; this is okay, too. 11645 */ 11646 for (; helper != NULL; helper = helper->dthp_next) { 11647 if ((pred = helper->dthp_predicate) != NULL) { 11648 if (trace) 11649 dtrace_helper_trace(helper, vstate, 0); 11650 11651 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 11652 goto next; 11653 11654 if (*flags & CPU_DTRACE_FAULT) 11655 goto err; 11656 } 11657 11658 for (i = 0; i < helper->dthp_nactions; i++) { 11659 if (trace) 11660 dtrace_helper_trace(helper, vstate, i + 1); 11661 11662 rval = dtrace_dif_emulate(helper->dthp_actions[i], 11663 mstate, vstate, state); 11664 11665 if (*flags & CPU_DTRACE_FAULT) 11666 goto err; 11667 } 11668 11669 next: 11670 if (trace) 11671 dtrace_helper_trace(helper, vstate, 11672 DTRACE_HELPTRACE_NEXT); 11673 } 11674 11675 if (trace) 11676 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_DONE); 11677 11678 /* 11679 * Restore the arg0 that we saved upon entry. 11680 */ 11681 mstate->dtms_arg[0] = sarg0; 11682 mstate->dtms_arg[1] = sarg1; 11683 11684 return (rval); 11685 11686 err: 11687 if (trace) 11688 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_ERR); 11689 11690 /* 11691 * Restore the arg0 that we saved upon entry. 11692 */ 11693 mstate->dtms_arg[0] = sarg0; 11694 mstate->dtms_arg[1] = sarg1; 11695 11696 return (NULL); 11697 } 11698 11699 static void 11700 dtrace_helper_destroy(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate) 11701 { 11702 int i; 11703 11704 if (helper->dthp_predicate != NULL) 11705 dtrace_difo_release(helper->dthp_predicate, vstate); 11706 11707 for (i = 0; i < helper->dthp_nactions; i++) { 11708 ASSERT(helper->dthp_actions[i] != NULL); 11709 dtrace_difo_release(helper->dthp_actions[i], vstate); 11710 } 11711 11712 kmem_free(helper->dthp_actions, 11713 helper->dthp_nactions * sizeof (dtrace_difo_t *)); 11714 kmem_free(helper, sizeof (dtrace_helper_action_t)); 11715 } 11716 11717 static int 11718 dtrace_helper_destroygen(int gen) 11719 { 11720 dtrace_helpers_t *help = curproc->p_dtrace_helpers; 11721 dtrace_vstate_t *vstate; 11722 int i; 11723 11724 ASSERT(MUTEX_HELD(&dtrace_lock)); 11725 11726 if (help == NULL || gen > help->dthps_generation) 11727 return (EINVAL); 11728 11729 vstate = &help->dthps_vstate; 11730 11731 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 11732 dtrace_helper_action_t *last = NULL, *h, *next; 11733 11734 for (h = help->dthps_actions[i]; h != NULL; h = next) { 11735 next = h->dthp_next; 11736 11737 if (h->dthp_generation == gen) { 11738 if (last != NULL) { 11739 last->dthp_next = next; 11740 } else { 11741 help->dthps_actions[i] = next; 11742 } 11743 11744 dtrace_helper_destroy(h, vstate); 11745 } else { 11746 last = h; 11747 } 11748 } 11749 } 11750 11751 return (0); 11752 } 11753 11754 static int 11755 dtrace_helper_validate(dtrace_helper_action_t *helper) 11756 { 11757 int err = 0, i; 11758 dtrace_difo_t *dp; 11759 11760 if ((dp = helper->dthp_predicate) != NULL) 11761 err += dtrace_difo_validate_helper(dp); 11762 11763 for (i = 0; i < helper->dthp_nactions; i++) 11764 err += dtrace_difo_validate_helper(helper->dthp_actions[i]); 11765 11766 return (err == 0); 11767 } 11768 11769 static int 11770 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 11771 { 11772 dtrace_helpers_t *help; 11773 dtrace_helper_action_t *helper, *last; 11774 dtrace_actdesc_t *act; 11775 dtrace_vstate_t *vstate; 11776 dtrace_predicate_t *pred; 11777 int count = 0, nactions = 0, i; 11778 11779 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 11780 return (EINVAL); 11781 11782 help = curproc->p_dtrace_helpers; 11783 last = help->dthps_actions[which]; 11784 vstate = &help->dthps_vstate; 11785 11786 for (count = 0; last != NULL; last = last->dthp_next) { 11787 count++; 11788 if (last->dthp_next == NULL) 11789 break; 11790 } 11791 11792 /* 11793 * If we already have dtrace_helper_actions_max helper actions for this 11794 * helper action type, we'll refuse to add a new one. 11795 */ 11796 if (count >= dtrace_helper_actions_max) 11797 return (ENOSPC); 11798 11799 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 11800 helper->dthp_generation = help->dthps_generation; 11801 11802 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 11803 ASSERT(pred->dtp_difo != NULL); 11804 dtrace_difo_hold(pred->dtp_difo); 11805 helper->dthp_predicate = pred->dtp_difo; 11806 } 11807 11808 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 11809 if (act->dtad_kind != DTRACEACT_DIFEXPR) 11810 goto err; 11811 11812 if (act->dtad_difo == NULL) 11813 goto err; 11814 11815 nactions++; 11816 } 11817 11818 helper->dthp_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 11819 (helper->dthp_nactions = nactions), KM_SLEEP); 11820 11821 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 11822 dtrace_difo_hold(act->dtad_difo); 11823 helper->dthp_actions[i++] = act->dtad_difo; 11824 } 11825 11826 if (!dtrace_helper_validate(helper)) 11827 goto err; 11828 11829 if (last == NULL) { 11830 help->dthps_actions[which] = helper; 11831 } else { 11832 last->dthp_next = helper; 11833 } 11834 11835 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 11836 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 11837 dtrace_helptrace_next = 0; 11838 } 11839 11840 return (0); 11841 err: 11842 dtrace_helper_destroy(helper, vstate); 11843 return (EINVAL); 11844 } 11845 11846 static void 11847 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 11848 dof_helper_t *dofhp) 11849 { 11850 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 11851 11852 mutex_enter(&dtrace_meta_lock); 11853 mutex_enter(&dtrace_lock); 11854 11855 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 11856 /* 11857 * If the dtrace module is loaded but not attached, or if 11858 * there aren't isn't a meta provider registered to deal with 11859 * these provider descriptions, we need to postpone creating 11860 * the actual providers until later. 11861 */ 11862 11863 if (help->dthps_next == NULL && help->dthps_prev == NULL && 11864 dtrace_deferred_pid != help) { 11865 help->dthps_pid = p->p_pid; 11866 help->dthps_next = dtrace_deferred_pid; 11867 help->dthps_prev = NULL; 11868 if (dtrace_deferred_pid != NULL) 11869 dtrace_deferred_pid->dthps_prev = help; 11870 dtrace_deferred_pid = help; 11871 } 11872 11873 mutex_exit(&dtrace_lock); 11874 11875 } else if (dofhp != NULL) { 11876 /* 11877 * If the dtrace module is loaded and we have a particular 11878 * helper provider description, pass that off to the 11879 * meta provider. 11880 */ 11881 11882 mutex_exit(&dtrace_lock); 11883 11884 dtrace_helper_provide(dofhp, p->p_pid); 11885 11886 } else { 11887 /* 11888 * Otherwise, just pass all the helper provider descriptions 11889 * off to the meta provider. 11890 */ 11891 11892 int i; 11893 mutex_exit(&dtrace_lock); 11894 11895 for (i = 0; i < help->dthps_nprovs; i++) { 11896 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 11897 p->p_pid); 11898 } 11899 } 11900 11901 mutex_exit(&dtrace_meta_lock); 11902 } 11903 11904 static int 11905 dtrace_helper_provider_add(dof_helper_t *dofhp) 11906 { 11907 dtrace_helpers_t *help; 11908 dtrace_helper_provider_t *hprov, **tmp_provs; 11909 uint_t tmp_nprovs, i; 11910 11911 help = curproc->p_dtrace_helpers; 11912 ASSERT(help != NULL); 11913 11914 /* 11915 * If we already have dtrace_helper_providers_max helper providers, 11916 * we're refuse to add a new one. 11917 */ 11918 if (help->dthps_nprovs >= dtrace_helper_providers_max) 11919 return (ENOSPC); 11920 11921 /* 11922 * Check to make sure this isn't a duplicate. 11923 */ 11924 for (i = 0; i < help->dthps_nprovs; i++) { 11925 if (dofhp->dofhp_addr == 11926 help->dthps_provs[i]->dthp_prov.dofhp_addr) 11927 return (EALREADY); 11928 } 11929 11930 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 11931 hprov->dthp_prov = *dofhp; 11932 hprov->dthp_ref = 1; 11933 11934 tmp_nprovs = help->dthps_nprovs; 11935 tmp_provs = help->dthps_provs; 11936 help->dthps_nprovs++; 11937 help->dthps_provs = kmem_zalloc(help->dthps_nprovs * 11938 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 11939 11940 help->dthps_provs[tmp_nprovs] = hprov; 11941 if (tmp_provs != NULL) { 11942 bcopy(tmp_provs, help->dthps_provs, tmp_nprovs * 11943 sizeof (dtrace_helper_provider_t *)); 11944 kmem_free(tmp_provs, tmp_nprovs * 11945 sizeof (dtrace_helper_provider_t *)); 11946 } 11947 11948 return (0); 11949 } 11950 11951 static void 11952 dtrace_helper_provider_remove(dtrace_helper_provider_t *hprov) 11953 { 11954 mutex_enter(&dtrace_lock); 11955 11956 if (--hprov->dthp_ref == 0) { 11957 dof_hdr_t *dof; 11958 mutex_exit(&dtrace_lock); 11959 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 11960 dtrace_dof_destroy(dof); 11961 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 11962 } else { 11963 mutex_exit(&dtrace_lock); 11964 } 11965 } 11966 11967 static int 11968 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 11969 { 11970 uintptr_t daddr = (uintptr_t)dof; 11971 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 11972 dof_provider_t *provider; 11973 dof_probe_t *probe; 11974 uint8_t *arg; 11975 char *strtab, *typestr; 11976 dof_stridx_t typeidx; 11977 size_t typesz; 11978 uint_t nprobes, j, k; 11979 11980 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 11981 11982 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 11983 dtrace_dof_error(dof, "misaligned section offset"); 11984 return (-1); 11985 } 11986 11987 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 11988 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 11989 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 11990 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 11991 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 11992 11993 if (str_sec == NULL || prb_sec == NULL || 11994 arg_sec == NULL || off_sec == NULL) 11995 return (-1); 11996 11997 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 11998 11999 if (provider->dofpv_name >= str_sec->dofs_size || 12000 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 12001 dtrace_dof_error(dof, "invalid provider name"); 12002 return (-1); 12003 } 12004 12005 if (prb_sec->dofs_entsize == 0 || 12006 prb_sec->dofs_entsize > prb_sec->dofs_size) { 12007 dtrace_dof_error(dof, "invalid entry size"); 12008 return (-1); 12009 } 12010 12011 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 12012 dtrace_dof_error(dof, "misaligned entry size"); 12013 return (-1); 12014 } 12015 12016 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 12017 dtrace_dof_error(dof, "invalid entry size"); 12018 return (-1); 12019 } 12020 12021 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 12022 dtrace_dof_error(dof, "misaligned section offset"); 12023 return (-1); 12024 } 12025 12026 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 12027 dtrace_dof_error(dof, "invalid entry size"); 12028 return (-1); 12029 } 12030 12031 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 12032 12033 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 12034 12035 /* 12036 * Take a pass through the probes to check for errors. 12037 */ 12038 for (j = 0; j < nprobes; j++) { 12039 probe = (dof_probe_t *)(uintptr_t)(daddr + 12040 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 12041 12042 if (probe->dofpr_func >= str_sec->dofs_size) { 12043 dtrace_dof_error(dof, "invalid function name"); 12044 return (-1); 12045 } 12046 12047 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 12048 dtrace_dof_error(dof, "function name too long"); 12049 return (-1); 12050 } 12051 12052 if (probe->dofpr_name >= str_sec->dofs_size || 12053 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 12054 dtrace_dof_error(dof, "invalid probe name"); 12055 return (-1); 12056 } 12057 12058 12059 if (probe->dofpr_offidx + probe->dofpr_noffs < 12060 probe->dofpr_offidx || 12061 (probe->dofpr_offidx + probe->dofpr_noffs) * 12062 off_sec->dofs_entsize > off_sec->dofs_size) { 12063 dtrace_dof_error(dof, "invalid probe offset"); 12064 return (-1); 12065 } 12066 12067 if (probe->dofpr_argidx + probe->dofpr_xargc < 12068 probe->dofpr_argidx || 12069 (probe->dofpr_argidx + probe->dofpr_xargc) * 12070 arg_sec->dofs_entsize > arg_sec->dofs_size) { 12071 dtrace_dof_error(dof, "invalid args"); 12072 return (-1); 12073 } 12074 12075 typeidx = probe->dofpr_nargv; 12076 typestr = strtab + probe->dofpr_nargv; 12077 for (k = 0; k < probe->dofpr_nargc; k++) { 12078 if (typeidx >= str_sec->dofs_size) { 12079 dtrace_dof_error(dof, "bad " 12080 "native argument type"); 12081 return (-1); 12082 } 12083 12084 typesz = strlen(typestr) + 1; 12085 if (typesz > DTRACE_ARGTYPELEN) { 12086 dtrace_dof_error(dof, "native " 12087 "argument type too long"); 12088 return (-1); 12089 } 12090 typeidx += typesz; 12091 typestr += typesz; 12092 } 12093 12094 typeidx = probe->dofpr_xargv; 12095 typestr = strtab + probe->dofpr_xargv; 12096 for (k = 0; k < probe->dofpr_xargc; k++) { 12097 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 12098 dtrace_dof_error(dof, "bad " 12099 "native argument index"); 12100 return (-1); 12101 } 12102 12103 if (typeidx >= str_sec->dofs_size) { 12104 dtrace_dof_error(dof, "bad " 12105 "translated argument type"); 12106 return (-1); 12107 } 12108 12109 typesz = strlen(typestr) + 1; 12110 if (typesz > DTRACE_ARGTYPELEN) { 12111 dtrace_dof_error(dof, "translated argument " 12112 "type too long"); 12113 return (-1); 12114 } 12115 12116 typeidx += typesz; 12117 typestr += typesz; 12118 } 12119 } 12120 12121 return (0); 12122 } 12123 12124 static int 12125 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 12126 { 12127 dtrace_helpers_t *help; 12128 dtrace_vstate_t *vstate; 12129 dtrace_enabling_t *enab = NULL; 12130 int i, gen, rv, nhelpers = 0, destroy = 1; 12131 12132 ASSERT(MUTEX_HELD(&dtrace_lock)); 12133 12134 if ((help = curproc->p_dtrace_helpers) == NULL) 12135 help = dtrace_helpers_create(curproc); 12136 12137 vstate = &help->dthps_vstate; 12138 12139 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 12140 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 12141 dtrace_dof_destroy(dof); 12142 return (rv); 12143 } 12144 12145 /* 12146 * Now we need to walk through the ECB descriptions in the enabling. 12147 */ 12148 for (i = 0; i < enab->dten_ndesc; i++) { 12149 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12150 dtrace_probedesc_t *desc = &ep->dted_probe; 12151 12152 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 12153 continue; 12154 12155 if (strcmp(desc->dtpd_mod, "helper") != 0) 12156 continue; 12157 12158 if (strcmp(desc->dtpd_func, "ustack") != 0) 12159 continue; 12160 12161 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 12162 ep)) != 0) { 12163 /* 12164 * Adding this helper action failed -- we are now going 12165 * to rip out the entire generation and return failure. 12166 */ 12167 (void) dtrace_helper_destroygen(help->dthps_generation); 12168 dtrace_enabling_destroy(enab); 12169 dtrace_dof_destroy(dof); 12170 dtrace_error = rv; 12171 return (-1); 12172 } 12173 12174 nhelpers++; 12175 } 12176 12177 if (nhelpers < enab->dten_ndesc) 12178 dtrace_dof_error(dof, "unmatched helpers"); 12179 12180 if (dhp != NULL) { 12181 uintptr_t daddr = (uintptr_t)dof; 12182 int err = 0; 12183 12184 /* 12185 * Look for helper probes. 12186 */ 12187 for (i = 0; i < dof->dofh_secnum; i++) { 12188 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 12189 dof->dofh_secoff + i * dof->dofh_secsize); 12190 12191 if (sec->dofs_type != DOF_SECT_PROVIDER) 12192 continue; 12193 12194 if (dtrace_helper_provider_validate(dof, sec) != 0) { 12195 err = 1; 12196 break; 12197 } 12198 } 12199 12200 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 12201 if (err == 0 && dtrace_helper_provider_add(dhp) == 0) 12202 destroy = 0; 12203 else 12204 dhp = NULL; 12205 } 12206 12207 gen = help->dthps_generation++; 12208 dtrace_enabling_destroy(enab); 12209 12210 if (dhp != NULL) { 12211 mutex_exit(&dtrace_lock); 12212 dtrace_helper_provider_register(curproc, help, dhp); 12213 mutex_enter(&dtrace_lock); 12214 } 12215 12216 if (destroy) 12217 dtrace_dof_destroy(dof); 12218 12219 return (gen); 12220 } 12221 12222 static dtrace_helpers_t * 12223 dtrace_helpers_create(proc_t *p) 12224 { 12225 dtrace_helpers_t *help; 12226 12227 ASSERT(MUTEX_HELD(&dtrace_lock)); 12228 ASSERT(p->p_dtrace_helpers == NULL); 12229 12230 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 12231 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 12232 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 12233 12234 p->p_dtrace_helpers = help; 12235 dtrace_opens++; 12236 12237 return (help); 12238 } 12239 12240 static void 12241 dtrace_helpers_destroy(void) 12242 { 12243 dtrace_helpers_t *help; 12244 dtrace_vstate_t *vstate; 12245 proc_t *p = curproc; 12246 int i; 12247 12248 mutex_enter(&dtrace_lock); 12249 12250 ASSERT(p->p_dtrace_helpers != NULL); 12251 ASSERT(dtrace_opens > 0); 12252 12253 help = p->p_dtrace_helpers; 12254 vstate = &help->dthps_vstate; 12255 12256 /* 12257 * We're now going to lose the help from this process. 12258 */ 12259 p->p_dtrace_helpers = NULL; 12260 dtrace_sync(); 12261 12262 /* 12263 * Destory the helper actions. 12264 */ 12265 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12266 dtrace_helper_action_t *h, *next; 12267 12268 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12269 next = h->dthp_next; 12270 dtrace_helper_destroy(h, vstate); 12271 h = next; 12272 } 12273 } 12274 12275 mutex_exit(&dtrace_lock); 12276 12277 /* 12278 * Destroy the helper providers. 12279 */ 12280 if (help->dthps_nprovs > 0) { 12281 mutex_enter(&dtrace_meta_lock); 12282 if (dtrace_meta_pid != NULL) { 12283 ASSERT(dtrace_deferred_pid == NULL); 12284 12285 for (i = 0; i < help->dthps_nprovs; i++) { 12286 dtrace_helper_remove( 12287 &help->dthps_provs[i]->dthp_prov, p->p_pid); 12288 } 12289 } else { 12290 mutex_enter(&dtrace_lock); 12291 ASSERT(dtrace_deferred_pid != NULL); 12292 12293 /* 12294 * Remove the helper from the deferred list. 12295 */ 12296 if (help->dthps_next != NULL) 12297 help->dthps_next->dthps_prev = help->dthps_prev; 12298 if (help->dthps_prev != NULL) 12299 help->dthps_prev->dthps_next = help->dthps_next; 12300 if (dtrace_deferred_pid == help) { 12301 dtrace_deferred_pid = help->dthps_next; 12302 ASSERT(help->dthps_prev == NULL); 12303 } 12304 12305 mutex_exit(&dtrace_lock); 12306 } 12307 12308 mutex_exit(&dtrace_meta_lock); 12309 12310 for (i = 0; i < help->dthps_nprovs; i++) { 12311 dtrace_helper_provider_remove(help->dthps_provs[i]); 12312 } 12313 12314 kmem_free(help->dthps_provs, help->dthps_nprovs * 12315 sizeof (dtrace_helper_provider_t *)); 12316 } 12317 12318 mutex_enter(&dtrace_lock); 12319 12320 dtrace_vstate_fini(&help->dthps_vstate); 12321 kmem_free(help->dthps_actions, 12322 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 12323 kmem_free(help, sizeof (dtrace_helpers_t)); 12324 12325 if (--dtrace_opens == 0) 12326 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12327 12328 mutex_exit(&dtrace_lock); 12329 } 12330 12331 static void 12332 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 12333 { 12334 dtrace_helpers_t *help, *newhelp; 12335 dtrace_helper_action_t *helper, *new, *last; 12336 dtrace_difo_t *dp; 12337 dtrace_vstate_t *vstate; 12338 int i, j, sz, hasprovs = 0; 12339 12340 mutex_enter(&dtrace_lock); 12341 ASSERT(from->p_dtrace_helpers != NULL); 12342 ASSERT(dtrace_opens > 0); 12343 12344 help = from->p_dtrace_helpers; 12345 newhelp = dtrace_helpers_create(to); 12346 ASSERT(to->p_dtrace_helpers != NULL); 12347 12348 newhelp->dthps_generation = help->dthps_generation; 12349 vstate = &newhelp->dthps_vstate; 12350 12351 /* 12352 * Duplicate the helper actions. 12353 */ 12354 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12355 if ((helper = help->dthps_actions[i]) == NULL) 12356 continue; 12357 12358 for (last = NULL; helper != NULL; helper = helper->dthp_next) { 12359 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 12360 KM_SLEEP); 12361 new->dthp_generation = helper->dthp_generation; 12362 12363 if ((dp = helper->dthp_predicate) != NULL) { 12364 dp = dtrace_difo_duplicate(dp, vstate); 12365 new->dthp_predicate = dp; 12366 } 12367 12368 new->dthp_nactions = helper->dthp_nactions; 12369 sz = sizeof (dtrace_difo_t *) * new->dthp_nactions; 12370 new->dthp_actions = kmem_alloc(sz, KM_SLEEP); 12371 12372 for (j = 0; j < new->dthp_nactions; j++) { 12373 dtrace_difo_t *dp = helper->dthp_actions[j]; 12374 12375 ASSERT(dp != NULL); 12376 dp = dtrace_difo_duplicate(dp, vstate); 12377 new->dthp_actions[j] = dp; 12378 } 12379 12380 if (last != NULL) { 12381 last->dthp_next = new; 12382 } else { 12383 newhelp->dthps_actions[i] = new; 12384 } 12385 12386 last = new; 12387 } 12388 } 12389 12390 /* 12391 * Duplicate the helper providers and register them with the 12392 * DTrace framework. 12393 */ 12394 if (help->dthps_nprovs > 0) { 12395 newhelp->dthps_nprovs = help->dthps_nprovs; 12396 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 12397 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12398 for (i = 0; i < newhelp->dthps_nprovs; i++) { 12399 newhelp->dthps_provs[i] = help->dthps_provs[i]; 12400 newhelp->dthps_provs[i]->dthp_ref++; 12401 } 12402 12403 hasprovs = 1; 12404 } 12405 12406 mutex_exit(&dtrace_lock); 12407 12408 if (hasprovs) 12409 dtrace_helper_provider_register(to, newhelp, NULL); 12410 } 12411 12412 /* 12413 * DTrace Hook Functions 12414 */ 12415 static void 12416 dtrace_module_loaded(struct modctl *ctl) 12417 { 12418 dtrace_provider_t *prv; 12419 12420 mutex_enter(&dtrace_provider_lock); 12421 mutex_enter(&mod_lock); 12422 12423 ASSERT(ctl->mod_busy); 12424 12425 /* 12426 * We're going to call each providers per-module provide operation 12427 * specifying only this module. 12428 */ 12429 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 12430 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 12431 12432 mutex_exit(&mod_lock); 12433 mutex_exit(&dtrace_provider_lock); 12434 12435 /* 12436 * If we have any retained enablings, we need to match against them. 12437 * Enabling probes requires that cpu_lock be held, and we cannot hold 12438 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 12439 * module. (In particular, this happens when loading scheduling 12440 * classes.) So if we have any retained enablings, we need to dispatch 12441 * our task queue to do the match for us. 12442 */ 12443 mutex_enter(&dtrace_lock); 12444 12445 if (dtrace_retained == NULL) { 12446 mutex_exit(&dtrace_lock); 12447 return; 12448 } 12449 12450 (void) taskq_dispatch(dtrace_taskq, 12451 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 12452 12453 mutex_exit(&dtrace_lock); 12454 12455 /* 12456 * And now, for a little heuristic sleaze: in general, we want to 12457 * match modules as soon as they load. However, we cannot guarantee 12458 * this, because it would lead us to the lock ordering violation 12459 * outlined above. The common case, of course, is that cpu_lock is 12460 * _not_ held -- so we delay here for a clock tick, hoping that that's 12461 * long enough for the task queue to do its work. If it's not, it's 12462 * not a serious problem -- it just means that the module that we 12463 * just loaded may not be immediately instrumentable. 12464 */ 12465 delay(1); 12466 } 12467 12468 static void 12469 dtrace_module_unloaded(struct modctl *ctl) 12470 { 12471 dtrace_probe_t template, *probe, *first, *next; 12472 dtrace_provider_t *prov; 12473 12474 template.dtpr_mod = ctl->mod_modname; 12475 12476 mutex_enter(&dtrace_provider_lock); 12477 mutex_enter(&mod_lock); 12478 mutex_enter(&dtrace_lock); 12479 12480 if (dtrace_bymod == NULL) { 12481 /* 12482 * The DTrace module is loaded (obviously) but not attached; 12483 * we don't have any work to do. 12484 */ 12485 mutex_exit(&dtrace_provider_lock); 12486 mutex_exit(&mod_lock); 12487 mutex_exit(&dtrace_lock); 12488 return; 12489 } 12490 12491 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 12492 probe != NULL; probe = probe->dtpr_nextmod) { 12493 if (probe->dtpr_ecb != NULL) { 12494 mutex_exit(&dtrace_provider_lock); 12495 mutex_exit(&mod_lock); 12496 mutex_exit(&dtrace_lock); 12497 12498 /* 12499 * This shouldn't _actually_ be possible -- we're 12500 * unloading a module that has an enabled probe in it. 12501 * (It's normally up to the provider to make sure that 12502 * this can't happen.) However, because dtps_enable() 12503 * doesn't have a failure mode, there can be an 12504 * enable/unload race. Upshot: we don't want to 12505 * assert, but we're not going to disable the 12506 * probe, either. 12507 */ 12508 if (dtrace_err_verbose) { 12509 cmn_err(CE_WARN, "unloaded module '%s' had " 12510 "enabled probes", ctl->mod_modname); 12511 } 12512 12513 return; 12514 } 12515 } 12516 12517 probe = first; 12518 12519 for (first = NULL; probe != NULL; probe = next) { 12520 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 12521 12522 dtrace_probes[probe->dtpr_id - 1] = NULL; 12523 12524 next = probe->dtpr_nextmod; 12525 dtrace_hash_remove(dtrace_bymod, probe); 12526 dtrace_hash_remove(dtrace_byfunc, probe); 12527 dtrace_hash_remove(dtrace_byname, probe); 12528 12529 if (first == NULL) { 12530 first = probe; 12531 probe->dtpr_nextmod = NULL; 12532 } else { 12533 probe->dtpr_nextmod = first; 12534 first = probe; 12535 } 12536 } 12537 12538 /* 12539 * We've removed all of the module's probes from the hash chains and 12540 * from the probe array. Now issue a dtrace_sync() to be sure that 12541 * everyone has cleared out from any probe array processing. 12542 */ 12543 dtrace_sync(); 12544 12545 for (probe = first; probe != NULL; probe = first) { 12546 first = probe->dtpr_nextmod; 12547 prov = probe->dtpr_provider; 12548 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 12549 probe->dtpr_arg); 12550 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 12551 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 12552 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 12553 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 12554 kmem_free(probe, sizeof (dtrace_probe_t)); 12555 } 12556 12557 mutex_exit(&dtrace_lock); 12558 mutex_exit(&mod_lock); 12559 mutex_exit(&dtrace_provider_lock); 12560 } 12561 12562 void 12563 dtrace_suspend(void) 12564 { 12565 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 12566 } 12567 12568 void 12569 dtrace_resume(void) 12570 { 12571 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 12572 } 12573 12574 static int 12575 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 12576 { 12577 ASSERT(MUTEX_HELD(&cpu_lock)); 12578 mutex_enter(&dtrace_lock); 12579 12580 switch (what) { 12581 case CPU_CONFIG: { 12582 dtrace_state_t *state; 12583 dtrace_optval_t *opt, rs, c; 12584 12585 /* 12586 * For now, we only allocate a new buffer for anonymous state. 12587 */ 12588 if ((state = dtrace_anon.dta_state) == NULL) 12589 break; 12590 12591 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12592 break; 12593 12594 opt = state->dts_options; 12595 c = opt[DTRACEOPT_CPU]; 12596 12597 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 12598 break; 12599 12600 /* 12601 * Regardless of what the actual policy is, we're going to 12602 * temporarily set our resize policy to be manual. We're 12603 * also going to temporarily set our CPU option to denote 12604 * the newly configured CPU. 12605 */ 12606 rs = opt[DTRACEOPT_BUFRESIZE]; 12607 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 12608 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 12609 12610 (void) dtrace_state_buffers(state); 12611 12612 opt[DTRACEOPT_BUFRESIZE] = rs; 12613 opt[DTRACEOPT_CPU] = c; 12614 12615 break; 12616 } 12617 12618 case CPU_UNCONFIG: 12619 /* 12620 * We don't free the buffer in the CPU_UNCONFIG case. (The 12621 * buffer will be freed when the consumer exits.) 12622 */ 12623 break; 12624 12625 default: 12626 break; 12627 } 12628 12629 mutex_exit(&dtrace_lock); 12630 return (0); 12631 } 12632 12633 static void 12634 dtrace_cpu_setup_initial(processorid_t cpu) 12635 { 12636 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 12637 } 12638 12639 static void 12640 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 12641 { 12642 if (dtrace_toxranges >= dtrace_toxranges_max) { 12643 int osize, nsize; 12644 dtrace_toxrange_t *range; 12645 12646 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12647 12648 if (osize == 0) { 12649 ASSERT(dtrace_toxrange == NULL); 12650 ASSERT(dtrace_toxranges_max == 0); 12651 dtrace_toxranges_max = 1; 12652 } else { 12653 dtrace_toxranges_max <<= 1; 12654 } 12655 12656 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12657 range = kmem_zalloc(nsize, KM_SLEEP); 12658 12659 if (dtrace_toxrange != NULL) { 12660 ASSERT(osize != 0); 12661 bcopy(dtrace_toxrange, range, osize); 12662 kmem_free(dtrace_toxrange, osize); 12663 } 12664 12665 dtrace_toxrange = range; 12666 } 12667 12668 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 12669 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 12670 12671 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 12672 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 12673 dtrace_toxranges++; 12674 } 12675 12676 /* 12677 * DTrace Driver Cookbook Functions 12678 */ 12679 /*ARGSUSED*/ 12680 static int 12681 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 12682 { 12683 dtrace_provider_id_t id; 12684 dtrace_state_t *state = NULL; 12685 dtrace_enabling_t *enab; 12686 12687 mutex_enter(&cpu_lock); 12688 mutex_enter(&dtrace_provider_lock); 12689 mutex_enter(&dtrace_lock); 12690 12691 if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t) + 12692 NCPU * sizeof (dtrace_buffer_t), 0) != 0) { 12693 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 12694 mutex_exit(&cpu_lock); 12695 mutex_exit(&dtrace_provider_lock); 12696 mutex_exit(&dtrace_lock); 12697 return (DDI_FAILURE); 12698 } 12699 12700 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 12701 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 12702 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 12703 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 12704 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 12705 ddi_remove_minor_node(devi, NULL); 12706 ddi_soft_state_fini(&dtrace_softstate); 12707 mutex_exit(&cpu_lock); 12708 mutex_exit(&dtrace_provider_lock); 12709 mutex_exit(&dtrace_lock); 12710 return (DDI_FAILURE); 12711 } 12712 12713 ddi_report_dev(devi); 12714 dtrace_devi = devi; 12715 12716 dtrace_modload = dtrace_module_loaded; 12717 dtrace_modunload = dtrace_module_unloaded; 12718 dtrace_cpu_init = dtrace_cpu_setup_initial; 12719 dtrace_helpers_cleanup = dtrace_helpers_destroy; 12720 dtrace_helpers_fork = dtrace_helpers_duplicate; 12721 dtrace_cpustart_init = dtrace_suspend; 12722 dtrace_cpustart_fini = dtrace_resume; 12723 dtrace_debugger_init = dtrace_suspend; 12724 dtrace_debugger_fini = dtrace_resume; 12725 dtrace_kreloc_init = dtrace_suspend; 12726 dtrace_kreloc_fini = dtrace_resume; 12727 12728 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 12729 12730 ASSERT(MUTEX_HELD(&cpu_lock)); 12731 12732 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 12733 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12734 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 12735 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 12736 VM_SLEEP | VMC_IDENTIFIER); 12737 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 12738 1, INT_MAX, 0); 12739 12740 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 12741 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 12742 NULL, NULL, NULL, NULL, NULL, 0); 12743 12744 ASSERT(MUTEX_HELD(&cpu_lock)); 12745 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 12746 offsetof(dtrace_probe_t, dtpr_nextmod), 12747 offsetof(dtrace_probe_t, dtpr_prevmod)); 12748 12749 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 12750 offsetof(dtrace_probe_t, dtpr_nextfunc), 12751 offsetof(dtrace_probe_t, dtpr_prevfunc)); 12752 12753 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 12754 offsetof(dtrace_probe_t, dtpr_nextname), 12755 offsetof(dtrace_probe_t, dtpr_prevname)); 12756 12757 if (dtrace_retain_max < 1) { 12758 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 12759 "setting to 1", dtrace_retain_max); 12760 dtrace_retain_max = 1; 12761 } 12762 12763 /* 12764 * Now discover our toxic ranges. 12765 */ 12766 dtrace_toxic_ranges(dtrace_toxrange_add); 12767 12768 /* 12769 * Before we register ourselves as a provider to our own framework, 12770 * we would like to assert that dtrace_provider is NULL -- but that's 12771 * not true if we were loaded as a dependency of a DTrace provider. 12772 * Once we've registered, we can assert that dtrace_provider is our 12773 * pseudo provider. 12774 */ 12775 (void) dtrace_register("dtrace", &dtrace_provider_attr, 12776 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 12777 12778 ASSERT(dtrace_provider != NULL); 12779 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 12780 12781 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 12782 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 12783 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 12784 dtrace_provider, NULL, NULL, "END", 0, NULL); 12785 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 12786 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 12787 12788 dtrace_anon_property(); 12789 mutex_exit(&cpu_lock); 12790 12791 /* 12792 * If DTrace helper tracing is enabled, we need to allocate the 12793 * trace buffer and initialize the values. 12794 */ 12795 if (dtrace_helptrace_enabled) { 12796 ASSERT(dtrace_helptrace_buffer == NULL); 12797 dtrace_helptrace_buffer = 12798 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 12799 dtrace_helptrace_next = 0; 12800 } 12801 12802 /* 12803 * If there are already providers, we must ask them to provide their 12804 * probes, and then match any anonymous enabling against them. Note 12805 * that there should be no other retained enablings at this time: 12806 * the only retained enablings at this time should be the anonymous 12807 * enabling. 12808 */ 12809 if (dtrace_anon.dta_enabling != NULL) { 12810 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 12811 12812 dtrace_enabling_provide(NULL); 12813 state = dtrace_anon.dta_state; 12814 12815 /* 12816 * We couldn't hold cpu_lock across the above call to 12817 * dtrace_enabling_provide(), but we must hold it to actually 12818 * enable the probes. We have to drop all of our locks, pick 12819 * up cpu_lock, and regain our locks before matching the 12820 * retained anonymous enabling. 12821 */ 12822 mutex_exit(&dtrace_lock); 12823 mutex_exit(&dtrace_provider_lock); 12824 12825 mutex_enter(&cpu_lock); 12826 mutex_enter(&dtrace_provider_lock); 12827 mutex_enter(&dtrace_lock); 12828 12829 if ((enab = dtrace_anon.dta_enabling) != NULL) 12830 (void) dtrace_enabling_match(enab, NULL); 12831 12832 mutex_exit(&cpu_lock); 12833 } 12834 12835 mutex_exit(&dtrace_lock); 12836 mutex_exit(&dtrace_provider_lock); 12837 12838 if (state != NULL) { 12839 /* 12840 * If we created any anonymous state, set it going now. 12841 */ 12842 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 12843 } 12844 12845 return (DDI_SUCCESS); 12846 } 12847 12848 /*ARGSUSED*/ 12849 static int 12850 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 12851 { 12852 dtrace_state_t *state; 12853 uint32_t priv; 12854 uid_t uid; 12855 12856 if (getminor(*devp) == DTRACEMNRN_HELPER) 12857 return (0); 12858 12859 /* 12860 * If this wasn't an open with the "helper" minor, then it must be 12861 * the "dtrace" minor. 12862 */ 12863 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 12864 12865 /* 12866 * If no DTRACE_PRIV_* bits are set in the credential, then the 12867 * caller lacks sufficient permission to do anything with DTrace. 12868 */ 12869 dtrace_cred2priv(cred_p, &priv, &uid); 12870 if (priv == DTRACE_PRIV_NONE) 12871 return (EACCES); 12872 12873 /* 12874 * Ask all providers to provide all their probes. 12875 */ 12876 mutex_enter(&dtrace_provider_lock); 12877 dtrace_probe_provide(NULL, NULL); 12878 mutex_exit(&dtrace_provider_lock); 12879 12880 mutex_enter(&cpu_lock); 12881 mutex_enter(&dtrace_lock); 12882 dtrace_opens++; 12883 dtrace_membar_producer(); 12884 12885 /* 12886 * If the kernel debugger is active (that is, if the kernel debugger 12887 * modified text in some way), we won't allow the open. 12888 */ 12889 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12890 dtrace_opens--; 12891 mutex_exit(&cpu_lock); 12892 mutex_exit(&dtrace_lock); 12893 return (EBUSY); 12894 } 12895 12896 state = dtrace_state_create(devp, cred_p); 12897 mutex_exit(&cpu_lock); 12898 12899 if (state == NULL) { 12900 if (--dtrace_opens == 0) 12901 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12902 mutex_exit(&dtrace_lock); 12903 return (EAGAIN); 12904 } 12905 12906 mutex_exit(&dtrace_lock); 12907 12908 return (0); 12909 } 12910 12911 /*ARGSUSED*/ 12912 static int 12913 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 12914 { 12915 minor_t minor = getminor(dev); 12916 dtrace_state_t *state; 12917 12918 if (minor == DTRACEMNRN_HELPER) 12919 return (0); 12920 12921 state = ddi_get_soft_state(dtrace_softstate, minor); 12922 12923 mutex_enter(&cpu_lock); 12924 mutex_enter(&dtrace_lock); 12925 12926 if (state->dts_anon) { 12927 /* 12928 * There is anonymous state. Destroy that first. 12929 */ 12930 ASSERT(dtrace_anon.dta_state == NULL); 12931 dtrace_state_destroy(state->dts_anon); 12932 } 12933 12934 dtrace_state_destroy(state); 12935 ASSERT(dtrace_opens > 0); 12936 if (--dtrace_opens == 0) 12937 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12938 12939 mutex_exit(&dtrace_lock); 12940 mutex_exit(&cpu_lock); 12941 12942 return (0); 12943 } 12944 12945 /*ARGSUSED*/ 12946 static int 12947 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 12948 { 12949 int rval; 12950 dof_helper_t help, *dhp = NULL; 12951 12952 switch (cmd) { 12953 case DTRACEHIOC_ADDDOF: 12954 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 12955 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 12956 return (EFAULT); 12957 } 12958 12959 dhp = &help; 12960 arg = (intptr_t)help.dofhp_dof; 12961 /*FALLTHROUGH*/ 12962 12963 case DTRACEHIOC_ADD: { 12964 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 12965 12966 if (dof == NULL) 12967 return (rval); 12968 12969 mutex_enter(&dtrace_lock); 12970 dtrace_error = 0; 12971 12972 /* 12973 * dtrace_helper_slurp() takes responsibility for the dof -- 12974 * it may free it now or it may save it and free it later. 12975 */ 12976 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 12977 *rv = rval; 12978 rval = 0; 12979 } else { 12980 rval = EINVAL; 12981 } 12982 12983 mutex_exit(&dtrace_lock); 12984 return (rval); 12985 } 12986 12987 case DTRACEHIOC_REMOVE: { 12988 mutex_enter(&dtrace_lock); 12989 rval = dtrace_helper_destroygen(arg); 12990 mutex_exit(&dtrace_lock); 12991 12992 return (rval); 12993 } 12994 12995 default: 12996 break; 12997 } 12998 12999 return (ENOTTY); 13000 } 13001 13002 /*ARGSUSED*/ 13003 static int 13004 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 13005 { 13006 minor_t minor = getminor(dev); 13007 dtrace_state_t *state; 13008 int rval; 13009 13010 if (minor == DTRACEMNRN_HELPER) 13011 return (dtrace_ioctl_helper(cmd, arg, rv)); 13012 13013 state = ddi_get_soft_state(dtrace_softstate, minor); 13014 13015 if (state->dts_anon) { 13016 ASSERT(dtrace_anon.dta_state == NULL); 13017 state = state->dts_anon; 13018 } 13019 13020 switch (cmd) { 13021 case DTRACEIOC_PROVIDER: { 13022 dtrace_providerdesc_t pvd; 13023 dtrace_provider_t *pvp; 13024 13025 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 13026 return (EFAULT); 13027 13028 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 13029 mutex_enter(&dtrace_provider_lock); 13030 13031 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 13032 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 13033 break; 13034 } 13035 13036 mutex_exit(&dtrace_provider_lock); 13037 13038 if (pvp == NULL) 13039 return (ESRCH); 13040 13041 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 13042 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 13043 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 13044 return (EFAULT); 13045 13046 return (0); 13047 } 13048 13049 case DTRACEIOC_EPROBE: { 13050 dtrace_eprobedesc_t epdesc; 13051 dtrace_ecb_t *ecb; 13052 dtrace_action_t *act; 13053 void *buf; 13054 size_t size; 13055 uintptr_t dest; 13056 int nrecs; 13057 13058 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 13059 return (EFAULT); 13060 13061 mutex_enter(&dtrace_lock); 13062 13063 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 13064 mutex_exit(&dtrace_lock); 13065 return (EINVAL); 13066 } 13067 13068 if (ecb->dte_probe == NULL) { 13069 mutex_exit(&dtrace_lock); 13070 return (EINVAL); 13071 } 13072 13073 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 13074 epdesc.dtepd_uarg = ecb->dte_uarg; 13075 epdesc.dtepd_size = ecb->dte_size; 13076 13077 nrecs = epdesc.dtepd_nrecs; 13078 epdesc.dtepd_nrecs = 0; 13079 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13080 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13081 continue; 13082 13083 epdesc.dtepd_nrecs++; 13084 } 13085 13086 /* 13087 * Now that we have the size, we need to allocate a temporary 13088 * buffer in which to store the complete description. We need 13089 * the temporary buffer to be able to drop dtrace_lock() 13090 * across the copyout(), below. 13091 */ 13092 size = sizeof (dtrace_eprobedesc_t) + 13093 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 13094 13095 buf = kmem_alloc(size, KM_SLEEP); 13096 dest = (uintptr_t)buf; 13097 13098 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 13099 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 13100 13101 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13102 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13103 continue; 13104 13105 if (nrecs-- == 0) 13106 break; 13107 13108 bcopy(&act->dta_rec, (void *)dest, 13109 sizeof (dtrace_recdesc_t)); 13110 dest += sizeof (dtrace_recdesc_t); 13111 } 13112 13113 mutex_exit(&dtrace_lock); 13114 13115 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13116 kmem_free(buf, size); 13117 return (EFAULT); 13118 } 13119 13120 kmem_free(buf, size); 13121 return (0); 13122 } 13123 13124 case DTRACEIOC_AGGDESC: { 13125 dtrace_aggdesc_t aggdesc; 13126 dtrace_action_t *act; 13127 dtrace_aggregation_t *agg; 13128 int nrecs; 13129 uint32_t offs; 13130 dtrace_recdesc_t *lrec; 13131 void *buf; 13132 size_t size; 13133 uintptr_t dest; 13134 13135 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 13136 return (EFAULT); 13137 13138 mutex_enter(&dtrace_lock); 13139 13140 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 13141 mutex_exit(&dtrace_lock); 13142 return (EINVAL); 13143 } 13144 13145 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 13146 13147 nrecs = aggdesc.dtagd_nrecs; 13148 aggdesc.dtagd_nrecs = 0; 13149 13150 offs = agg->dtag_base; 13151 lrec = &agg->dtag_action.dta_rec; 13152 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 13153 13154 for (act = agg->dtag_first; ; act = act->dta_next) { 13155 ASSERT(act->dta_intuple || 13156 DTRACEACT_ISAGG(act->dta_kind)); 13157 aggdesc.dtagd_nrecs++; 13158 13159 if (act == &agg->dtag_action) 13160 break; 13161 } 13162 13163 /* 13164 * Now that we have the size, we need to allocate a temporary 13165 * buffer in which to store the complete description. We need 13166 * the temporary buffer to be able to drop dtrace_lock() 13167 * across the copyout(), below. 13168 */ 13169 size = sizeof (dtrace_aggdesc_t) + 13170 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 13171 13172 buf = kmem_alloc(size, KM_SLEEP); 13173 dest = (uintptr_t)buf; 13174 13175 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 13176 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 13177 13178 for (act = agg->dtag_first; ; act = act->dta_next) { 13179 dtrace_recdesc_t rec = act->dta_rec; 13180 13181 if (nrecs-- == 0) 13182 break; 13183 13184 rec.dtrd_offset -= offs; 13185 bcopy(&rec, (void *)dest, sizeof (rec)); 13186 dest += sizeof (dtrace_recdesc_t); 13187 13188 if (act == &agg->dtag_action) 13189 break; 13190 } 13191 13192 mutex_exit(&dtrace_lock); 13193 13194 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13195 kmem_free(buf, size); 13196 return (EFAULT); 13197 } 13198 13199 kmem_free(buf, size); 13200 return (0); 13201 } 13202 13203 case DTRACEIOC_ENABLE: { 13204 dof_hdr_t *dof; 13205 dtrace_enabling_t *enab = NULL; 13206 dtrace_vstate_t *vstate; 13207 int err = 0; 13208 13209 *rv = 0; 13210 13211 /* 13212 * If a NULL argument has been passed, we take this as our 13213 * cue to reevaluate our enablings. 13214 */ 13215 if (arg == NULL) { 13216 mutex_enter(&cpu_lock); 13217 mutex_enter(&dtrace_lock); 13218 err = dtrace_enabling_matchstate(state, rv); 13219 mutex_exit(&dtrace_lock); 13220 mutex_exit(&cpu_lock); 13221 13222 return (err); 13223 } 13224 13225 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 13226 return (rval); 13227 13228 mutex_enter(&cpu_lock); 13229 mutex_enter(&dtrace_lock); 13230 vstate = &state->dts_vstate; 13231 13232 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13233 mutex_exit(&dtrace_lock); 13234 mutex_exit(&cpu_lock); 13235 dtrace_dof_destroy(dof); 13236 return (EBUSY); 13237 } 13238 13239 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 13240 mutex_exit(&dtrace_lock); 13241 mutex_exit(&cpu_lock); 13242 dtrace_dof_destroy(dof); 13243 return (EINVAL); 13244 } 13245 13246 if ((rval = dtrace_dof_options(dof, state)) != 0) { 13247 dtrace_enabling_destroy(enab); 13248 mutex_exit(&dtrace_lock); 13249 mutex_exit(&cpu_lock); 13250 dtrace_dof_destroy(dof); 13251 return (rval); 13252 } 13253 13254 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 13255 err = dtrace_enabling_retain(enab); 13256 } else { 13257 dtrace_enabling_destroy(enab); 13258 } 13259 13260 mutex_exit(&cpu_lock); 13261 mutex_exit(&dtrace_lock); 13262 dtrace_dof_destroy(dof); 13263 13264 return (err); 13265 } 13266 13267 case DTRACEIOC_REPLICATE: { 13268 dtrace_repldesc_t desc; 13269 dtrace_probedesc_t *match = &desc.dtrpd_match; 13270 dtrace_probedesc_t *create = &desc.dtrpd_create; 13271 int err; 13272 13273 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13274 return (EFAULT); 13275 13276 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13277 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13278 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13279 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13280 13281 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13282 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13283 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13284 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13285 13286 mutex_enter(&dtrace_lock); 13287 err = dtrace_enabling_replicate(state, match, create); 13288 mutex_exit(&dtrace_lock); 13289 13290 return (err); 13291 } 13292 13293 case DTRACEIOC_PROBEMATCH: 13294 case DTRACEIOC_PROBES: { 13295 dtrace_probe_t *probe = NULL; 13296 dtrace_probedesc_t desc; 13297 dtrace_probekey_t pkey; 13298 dtrace_id_t i; 13299 int m = 0; 13300 uint32_t priv; 13301 uid_t uid; 13302 13303 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13304 return (EFAULT); 13305 13306 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13307 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13308 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13309 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13310 13311 /* 13312 * Before we attempt to match this probe, we want to give 13313 * all providers the opportunity to provide it. 13314 */ 13315 if (desc.dtpd_id == DTRACE_IDNONE) { 13316 mutex_enter(&dtrace_provider_lock); 13317 dtrace_probe_provide(&desc, NULL); 13318 mutex_exit(&dtrace_provider_lock); 13319 desc.dtpd_id++; 13320 } 13321 13322 if (cmd == DTRACEIOC_PROBEMATCH) { 13323 dtrace_probekey(&desc, &pkey); 13324 pkey.dtpk_id = DTRACE_IDNONE; 13325 } 13326 13327 uid = crgetuid(cr); 13328 dtrace_cred2priv(cr, &priv, &uid); 13329 13330 mutex_enter(&dtrace_lock); 13331 13332 if (cmd == DTRACEIOC_PROBEMATCH) { 13333 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13334 if ((probe = dtrace_probes[i - 1]) != NULL && 13335 (m = dtrace_match_probe(probe, &pkey, 13336 priv, uid)) != 0) 13337 break; 13338 } 13339 13340 if (m < 0) { 13341 mutex_exit(&dtrace_lock); 13342 return (EINVAL); 13343 } 13344 13345 } else { 13346 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13347 if ((probe = dtrace_probes[i - 1]) != NULL && 13348 dtrace_match_priv(probe, priv, uid)) 13349 break; 13350 } 13351 } 13352 13353 if (probe == NULL) { 13354 mutex_exit(&dtrace_lock); 13355 return (ESRCH); 13356 } 13357 13358 dtrace_probe_description(probe, &desc); 13359 mutex_exit(&dtrace_lock); 13360 13361 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13362 return (EFAULT); 13363 13364 return (0); 13365 } 13366 13367 case DTRACEIOC_PROBEARG: { 13368 dtrace_argdesc_t desc; 13369 dtrace_probe_t *probe; 13370 dtrace_provider_t *prov; 13371 13372 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13373 return (EFAULT); 13374 13375 if (desc.dtargd_id == DTRACE_IDNONE) 13376 return (EINVAL); 13377 13378 if (desc.dtargd_ndx == DTRACE_ARGNONE) 13379 return (EINVAL); 13380 13381 mutex_enter(&dtrace_provider_lock); 13382 mutex_enter(&mod_lock); 13383 mutex_enter(&dtrace_lock); 13384 13385 if (desc.dtargd_id > dtrace_nprobes) { 13386 mutex_exit(&dtrace_lock); 13387 mutex_exit(&mod_lock); 13388 mutex_exit(&dtrace_provider_lock); 13389 return (EINVAL); 13390 } 13391 13392 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 13393 mutex_exit(&dtrace_lock); 13394 mutex_exit(&mod_lock); 13395 mutex_exit(&dtrace_provider_lock); 13396 return (EINVAL); 13397 } 13398 13399 mutex_exit(&dtrace_lock); 13400 13401 prov = probe->dtpr_provider; 13402 13403 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 13404 /* 13405 * There isn't any typed information for this probe. 13406 * Set the argument number to DTRACE_ARGNONE. 13407 */ 13408 desc.dtargd_ndx = DTRACE_ARGNONE; 13409 } else { 13410 desc.dtargd_native[0] = '\0'; 13411 desc.dtargd_xlate[0] = '\0'; 13412 desc.dtargd_mapping = desc.dtargd_ndx; 13413 13414 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 13415 probe->dtpr_id, probe->dtpr_arg, &desc); 13416 } 13417 13418 mutex_exit(&mod_lock); 13419 mutex_exit(&dtrace_provider_lock); 13420 13421 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13422 return (EFAULT); 13423 13424 return (0); 13425 } 13426 13427 case DTRACEIOC_GO: { 13428 processorid_t cpuid; 13429 rval = dtrace_state_go(state, &cpuid); 13430 13431 if (rval != 0) 13432 return (rval); 13433 13434 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13435 return (EFAULT); 13436 13437 return (0); 13438 } 13439 13440 case DTRACEIOC_STOP: { 13441 processorid_t cpuid; 13442 13443 mutex_enter(&dtrace_lock); 13444 rval = dtrace_state_stop(state, &cpuid); 13445 mutex_exit(&dtrace_lock); 13446 13447 if (rval != 0) 13448 return (rval); 13449 13450 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13451 return (EFAULT); 13452 13453 return (0); 13454 } 13455 13456 case DTRACEIOC_DOFGET: { 13457 dof_hdr_t hdr, *dof; 13458 uint64_t len; 13459 13460 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 13461 return (EFAULT); 13462 13463 mutex_enter(&dtrace_lock); 13464 dof = dtrace_dof_create(state); 13465 mutex_exit(&dtrace_lock); 13466 13467 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 13468 rval = copyout(dof, (void *)arg, len); 13469 dtrace_dof_destroy(dof); 13470 13471 return (rval == 0 ? 0 : EFAULT); 13472 } 13473 13474 case DTRACEIOC_AGGSNAP: 13475 case DTRACEIOC_BUFSNAP: { 13476 dtrace_bufdesc_t desc; 13477 caddr_t cached; 13478 dtrace_buffer_t *buf; 13479 13480 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13481 return (EFAULT); 13482 13483 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 13484 return (EINVAL); 13485 13486 mutex_enter(&dtrace_lock); 13487 13488 if (cmd == DTRACEIOC_BUFSNAP) { 13489 buf = &state->dts_buffer[desc.dtbd_cpu]; 13490 } else { 13491 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 13492 } 13493 13494 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 13495 size_t sz = buf->dtb_offset; 13496 13497 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 13498 mutex_exit(&dtrace_lock); 13499 return (EBUSY); 13500 } 13501 13502 /* 13503 * If this buffer has already been consumed, we're 13504 * going to indicate that there's nothing left here 13505 * to consume. 13506 */ 13507 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 13508 mutex_exit(&dtrace_lock); 13509 13510 desc.dtbd_size = 0; 13511 desc.dtbd_drops = 0; 13512 desc.dtbd_errors = 0; 13513 desc.dtbd_oldest = 0; 13514 sz = sizeof (desc); 13515 13516 if (copyout(&desc, (void *)arg, sz) != 0) 13517 return (EFAULT); 13518 13519 return (0); 13520 } 13521 13522 /* 13523 * If this is a ring buffer that has wrapped, we want 13524 * to copy the whole thing out. 13525 */ 13526 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 13527 dtrace_buffer_polish(buf); 13528 sz = buf->dtb_size; 13529 } 13530 13531 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 13532 mutex_exit(&dtrace_lock); 13533 return (EFAULT); 13534 } 13535 13536 desc.dtbd_size = sz; 13537 desc.dtbd_drops = buf->dtb_drops; 13538 desc.dtbd_errors = buf->dtb_errors; 13539 desc.dtbd_oldest = buf->dtb_xamot_offset; 13540 13541 mutex_exit(&dtrace_lock); 13542 13543 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13544 return (EFAULT); 13545 13546 buf->dtb_flags |= DTRACEBUF_CONSUMED; 13547 13548 return (0); 13549 } 13550 13551 if (buf->dtb_tomax == NULL) { 13552 ASSERT(buf->dtb_xamot == NULL); 13553 mutex_exit(&dtrace_lock); 13554 return (ENOENT); 13555 } 13556 13557 cached = buf->dtb_tomax; 13558 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 13559 13560 dtrace_xcall(desc.dtbd_cpu, 13561 (dtrace_xcall_t)dtrace_buffer_switch, buf); 13562 13563 state->dts_errors += buf->dtb_xamot_errors; 13564 13565 /* 13566 * If the buffers did not actually switch, then the cross call 13567 * did not take place -- presumably because the given CPU is 13568 * not in the ready set. If this is the case, we'll return 13569 * ENOENT. 13570 */ 13571 if (buf->dtb_tomax == cached) { 13572 ASSERT(buf->dtb_xamot != cached); 13573 mutex_exit(&dtrace_lock); 13574 return (ENOENT); 13575 } 13576 13577 ASSERT(cached == buf->dtb_xamot); 13578 13579 /* 13580 * We have our snapshot; now copy it out. 13581 */ 13582 if (copyout(buf->dtb_xamot, desc.dtbd_data, 13583 buf->dtb_xamot_offset) != 0) { 13584 mutex_exit(&dtrace_lock); 13585 return (EFAULT); 13586 } 13587 13588 desc.dtbd_size = buf->dtb_xamot_offset; 13589 desc.dtbd_drops = buf->dtb_xamot_drops; 13590 desc.dtbd_errors = buf->dtb_xamot_errors; 13591 desc.dtbd_oldest = 0; 13592 13593 mutex_exit(&dtrace_lock); 13594 13595 /* 13596 * Finally, copy out the buffer description. 13597 */ 13598 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13599 return (EFAULT); 13600 13601 return (0); 13602 } 13603 13604 case DTRACEIOC_CONF: { 13605 dtrace_conf_t conf; 13606 13607 bzero(&conf, sizeof (conf)); 13608 conf.dtc_difversion = DIF_VERSION; 13609 conf.dtc_difintregs = DIF_DIR_NREGS; 13610 conf.dtc_diftupregs = DIF_DTR_NREGS; 13611 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 13612 13613 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 13614 return (EFAULT); 13615 13616 return (0); 13617 } 13618 13619 case DTRACEIOC_STATUS: { 13620 dtrace_status_t stat; 13621 dtrace_dstate_t *dstate; 13622 int i, j; 13623 uint64_t nerrs; 13624 13625 /* 13626 * See the comment in dtrace_state_deadman() for the reason 13627 * for setting dts_laststatus to INT64_MAX before setting 13628 * it to the correct value. 13629 */ 13630 state->dts_laststatus = INT64_MAX; 13631 dtrace_membar_producer(); 13632 state->dts_laststatus = dtrace_gethrtime(); 13633 13634 bzero(&stat, sizeof (stat)); 13635 13636 mutex_enter(&dtrace_lock); 13637 13638 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 13639 mutex_exit(&dtrace_lock); 13640 return (ENOENT); 13641 } 13642 13643 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 13644 stat.dtst_exiting = 1; 13645 13646 nerrs = state->dts_errors; 13647 dstate = &state->dts_vstate.dtvs_dynvars; 13648 13649 for (i = 0; i < NCPU; i++) { 13650 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 13651 13652 stat.dtst_dyndrops += dcpu->dtdsc_drops; 13653 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 13654 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 13655 13656 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 13657 stat.dtst_filled++; 13658 13659 nerrs += state->dts_buffer[i].dtb_errors; 13660 13661 for (j = 0; j < state->dts_nspeculations; j++) { 13662 dtrace_speculation_t *spec; 13663 dtrace_buffer_t *buf; 13664 13665 spec = &state->dts_speculations[j]; 13666 buf = &spec->dtsp_buffer[i]; 13667 stat.dtst_specdrops += buf->dtb_xamot_drops; 13668 } 13669 } 13670 13671 stat.dtst_specdrops_busy = state->dts_speculations_busy; 13672 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 13673 stat.dtst_killed = 13674 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 13675 stat.dtst_errors = nerrs; 13676 13677 mutex_exit(&dtrace_lock); 13678 13679 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 13680 return (EFAULT); 13681 13682 return (0); 13683 } 13684 13685 case DTRACEIOC_FORMAT: { 13686 dtrace_fmtdesc_t fmt; 13687 char *str; 13688 int len; 13689 13690 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 13691 return (EFAULT); 13692 13693 mutex_enter(&dtrace_lock); 13694 13695 if (fmt.dtfd_format == 0 || 13696 fmt.dtfd_format > state->dts_nformats) { 13697 mutex_exit(&dtrace_lock); 13698 return (EINVAL); 13699 } 13700 13701 /* 13702 * Format strings are allocated contiguously and they are 13703 * never freed; if a format index is less than the number 13704 * of formats, we can assert that the format map is non-NULL 13705 * and that the format for the specified index is non-NULL. 13706 */ 13707 ASSERT(state->dts_formats != NULL); 13708 str = state->dts_formats[fmt.dtfd_format - 1]; 13709 ASSERT(str != NULL); 13710 13711 len = strlen(str) + 1; 13712 13713 if (len > fmt.dtfd_length) { 13714 fmt.dtfd_length = len; 13715 13716 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 13717 mutex_exit(&dtrace_lock); 13718 return (EINVAL); 13719 } 13720 } else { 13721 if (copyout(str, fmt.dtfd_string, len) != 0) { 13722 mutex_exit(&dtrace_lock); 13723 return (EINVAL); 13724 } 13725 } 13726 13727 mutex_exit(&dtrace_lock); 13728 return (0); 13729 } 13730 13731 default: 13732 break; 13733 } 13734 13735 return (ENOTTY); 13736 } 13737 13738 /*ARGSUSED*/ 13739 static int 13740 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 13741 { 13742 dtrace_state_t *state; 13743 13744 switch (cmd) { 13745 case DDI_DETACH: 13746 break; 13747 13748 case DDI_SUSPEND: 13749 return (DDI_SUCCESS); 13750 13751 default: 13752 return (DDI_FAILURE); 13753 } 13754 13755 mutex_enter(&cpu_lock); 13756 mutex_enter(&dtrace_provider_lock); 13757 mutex_enter(&dtrace_lock); 13758 13759 if (dtrace_opens > 0) { 13760 /* 13761 * This is only possible because of DTrace helpers attached 13762 * to a process -- they count as a DTrace open. If the locking 13763 * weren't such a mess, we could assert that p_dtrace_helpers 13764 * is non-NULL for some process. 13765 */ 13766 mutex_exit(&dtrace_provider_lock); 13767 mutex_exit(&dtrace_lock); 13768 mutex_exit(&cpu_lock); 13769 return (DDI_FAILURE); 13770 } 13771 13772 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 13773 mutex_exit(&dtrace_provider_lock); 13774 mutex_exit(&dtrace_lock); 13775 mutex_exit(&cpu_lock); 13776 return (DDI_FAILURE); 13777 } 13778 13779 dtrace_provider = NULL; 13780 13781 if ((state = dtrace_anon_grab()) != NULL) { 13782 /* 13783 * If there were ECBs on this state, the provider should 13784 * have not been allowed to detach; assert that there is 13785 * none. 13786 */ 13787 ASSERT(state->dts_necbs == 0); 13788 dtrace_state_destroy(state); 13789 13790 /* 13791 * If we're being detached with anonymous state, we need to 13792 * indicate to the kernel debugger that DTrace is now inactive. 13793 */ 13794 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13795 } 13796 13797 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 13798 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13799 dtrace_cpu_init = NULL; 13800 dtrace_helpers_cleanup = NULL; 13801 dtrace_helpers_fork = NULL; 13802 dtrace_cpustart_init = NULL; 13803 dtrace_cpustart_fini = NULL; 13804 dtrace_debugger_init = NULL; 13805 dtrace_debugger_fini = NULL; 13806 dtrace_kreloc_init = NULL; 13807 dtrace_kreloc_fini = NULL; 13808 dtrace_modload = NULL; 13809 dtrace_modunload = NULL; 13810 13811 mutex_exit(&cpu_lock); 13812 13813 if (dtrace_helptrace_enabled) { 13814 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 13815 dtrace_helptrace_buffer = NULL; 13816 } 13817 13818 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 13819 dtrace_probes = NULL; 13820 dtrace_nprobes = 0; 13821 13822 dtrace_hash_destroy(dtrace_bymod); 13823 dtrace_hash_destroy(dtrace_byfunc); 13824 dtrace_hash_destroy(dtrace_byname); 13825 dtrace_bymod = NULL; 13826 dtrace_byfunc = NULL; 13827 dtrace_byname = NULL; 13828 13829 kmem_cache_destroy(dtrace_state_cache); 13830 vmem_destroy(dtrace_minor); 13831 vmem_destroy(dtrace_arena); 13832 13833 if (dtrace_toxrange != NULL) { 13834 kmem_free(dtrace_toxrange, 13835 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 13836 dtrace_toxrange = NULL; 13837 dtrace_toxranges = 0; 13838 dtrace_toxranges_max = 0; 13839 } 13840 13841 ddi_remove_minor_node(dtrace_devi, NULL); 13842 dtrace_devi = NULL; 13843 13844 ddi_soft_state_fini(&dtrace_softstate); 13845 13846 ASSERT(dtrace_vtime_references == 0); 13847 ASSERT(dtrace_opens == 0); 13848 ASSERT(dtrace_retained == NULL); 13849 13850 mutex_exit(&dtrace_lock); 13851 mutex_exit(&dtrace_provider_lock); 13852 13853 /* 13854 * We don't destroy the task queue until after we have dropped our 13855 * locks (taskq_destroy() may block on running tasks). To prevent 13856 * attempting to do work after we have effectively detached but before 13857 * the task queue has been destroyed, all tasks dispatched via the 13858 * task queue must check that DTrace is still attached before 13859 * performing any operation. 13860 */ 13861 taskq_destroy(dtrace_taskq); 13862 dtrace_taskq = NULL; 13863 13864 return (DDI_SUCCESS); 13865 } 13866 13867 /*ARGSUSED*/ 13868 static int 13869 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 13870 { 13871 int error; 13872 13873 switch (infocmd) { 13874 case DDI_INFO_DEVT2DEVINFO: 13875 *result = (void *)dtrace_devi; 13876 error = DDI_SUCCESS; 13877 break; 13878 case DDI_INFO_DEVT2INSTANCE: 13879 *result = (void *)0; 13880 error = DDI_SUCCESS; 13881 break; 13882 default: 13883 error = DDI_FAILURE; 13884 } 13885 return (error); 13886 } 13887 13888 static struct cb_ops dtrace_cb_ops = { 13889 dtrace_open, /* open */ 13890 dtrace_close, /* close */ 13891 nulldev, /* strategy */ 13892 nulldev, /* print */ 13893 nodev, /* dump */ 13894 nodev, /* read */ 13895 nodev, /* write */ 13896 dtrace_ioctl, /* ioctl */ 13897 nodev, /* devmap */ 13898 nodev, /* mmap */ 13899 nodev, /* segmap */ 13900 nochpoll, /* poll */ 13901 ddi_prop_op, /* cb_prop_op */ 13902 0, /* streamtab */ 13903 D_NEW | D_MP /* Driver compatibility flag */ 13904 }; 13905 13906 static struct dev_ops dtrace_ops = { 13907 DEVO_REV, /* devo_rev */ 13908 0, /* refcnt */ 13909 dtrace_info, /* get_dev_info */ 13910 nulldev, /* identify */ 13911 nulldev, /* probe */ 13912 dtrace_attach, /* attach */ 13913 dtrace_detach, /* detach */ 13914 nodev, /* reset */ 13915 &dtrace_cb_ops, /* driver operations */ 13916 NULL, /* bus operations */ 13917 nodev /* dev power */ 13918 }; 13919 13920 static struct modldrv modldrv = { 13921 &mod_driverops, /* module type (this is a pseudo driver) */ 13922 "Dynamic Tracing", /* name of module */ 13923 &dtrace_ops, /* driver ops */ 13924 }; 13925 13926 static struct modlinkage modlinkage = { 13927 MODREV_1, 13928 (void *)&modldrv, 13929 NULL 13930 }; 13931 13932 int 13933 _init(void) 13934 { 13935 return (mod_install(&modlinkage)); 13936 } 13937 13938 int 13939 _info(struct modinfo *modinfop) 13940 { 13941 return (mod_info(&modlinkage, modinfop)); 13942 } 13943 13944 int 13945 _fini(void) 13946 { 13947 return (mod_remove(&modlinkage)); 13948 } 13949