1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70 #include <sys/errno.h> 71 #if !defined(sun) 72 #include <sys/time.h> 73 #endif 74 #include <sys/stat.h> 75 #include <sys/modctl.h> 76 #include <sys/conf.h> 77 #include <sys/systm.h> 78 #if defined(sun) 79 #include <sys/ddi.h> 80 #include <sys/sunddi.h> 81 #endif 82 #include <sys/cpuvar.h> 83 #include <sys/kmem.h> 84 #if defined(sun) 85 #include <sys/strsubr.h> 86 #endif 87 #include <sys/sysmacros.h> 88 #include <sys/dtrace_impl.h> 89 #include <sys/atomic.h> 90 #include <sys/cmn_err.h> 91 #if defined(sun) 92 #include <sys/mutex_impl.h> 93 #include <sys/rwlock_impl.h> 94 #endif 95 #include <sys/ctf_api.h> 96 #if defined(sun) 97 #include <sys/panic.h> 98 #include <sys/priv_impl.h> 99 #endif 100 #include <sys/policy.h> 101 #if defined(sun) 102 #include <sys/cred_impl.h> 103 #include <sys/procfs_isa.h> 104 #endif 105 #include <sys/taskq.h> 106 #if defined(sun) 107 #include <sys/mkdev.h> 108 #include <sys/kdi.h> 109 #endif 110 #include <sys/zone.h> 111 #include <sys/socket.h> 112 #include <netinet/in.h> 113 114 /* FreeBSD includes: */ 115 #if !defined(sun) 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/limits.h> 119 #include <sys/kdb.h> 120 #include <sys/kernel.h> 121 #include <sys/malloc.h> 122 #include <sys/sysctl.h> 123 #include <sys/lock.h> 124 #include <sys/mutex.h> 125 #include <sys/rwlock.h> 126 #include <sys/sx.h> 127 #include <sys/dtrace_bsd.h> 128 #include <netinet/in.h> 129 #include "dtrace_cddl.h" 130 #include "dtrace_debug.c" 131 #endif 132 133 /* 134 * DTrace Tunable Variables 135 * 136 * The following variables may be tuned by adding a line to /etc/system that 137 * includes both the name of the DTrace module ("dtrace") and the name of the 138 * variable. For example: 139 * 140 * set dtrace:dtrace_destructive_disallow = 1 141 * 142 * In general, the only variables that one should be tuning this way are those 143 * that affect system-wide DTrace behavior, and for which the default behavior 144 * is undesirable. Most of these variables are tunable on a per-consumer 145 * basis using DTrace options, and need not be tuned on a system-wide basis. 146 * When tuning these variables, avoid pathological values; while some attempt 147 * is made to verify the integrity of these variables, they are not considered 148 * part of the supported interface to DTrace, and they are therefore not 149 * checked comprehensively. Further, these variables should not be tuned 150 * dynamically via "mdb -kw" or other means; they should only be tuned via 151 * /etc/system. 152 */ 153 int dtrace_destructive_disallow = 0; 154 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 155 size_t dtrace_difo_maxsize = (256 * 1024); 156 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 157 size_t dtrace_global_maxsize = (16 * 1024); 158 size_t dtrace_actions_max = (16 * 1024); 159 size_t dtrace_retain_max = 1024; 160 dtrace_optval_t dtrace_helper_actions_max = 128; 161 dtrace_optval_t dtrace_helper_providers_max = 32; 162 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 163 size_t dtrace_strsize_default = 256; 164 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 165 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 166 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 167 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 168 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 169 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 170 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 171 dtrace_optval_t dtrace_nspec_default = 1; 172 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 173 dtrace_optval_t dtrace_stackframes_default = 20; 174 dtrace_optval_t dtrace_ustackframes_default = 20; 175 dtrace_optval_t dtrace_jstackframes_default = 50; 176 dtrace_optval_t dtrace_jstackstrsize_default = 512; 177 int dtrace_msgdsize_max = 128; 178 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 179 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 180 int dtrace_devdepth_max = 32; 181 int dtrace_err_verbose; 182 hrtime_t dtrace_deadman_interval = NANOSEC; 183 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 184 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 185 186 /* 187 * DTrace External Variables 188 * 189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 190 * available to DTrace consumers via the backtick (`) syntax. One of these, 191 * dtrace_zero, is made deliberately so: it is provided as a source of 192 * well-known, zero-filled memory. While this variable is not documented, 193 * it is used by some translators as an implementation detail. 194 */ 195 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 196 197 /* 198 * DTrace Internal Variables 199 */ 200 #if defined(sun) 201 static dev_info_t *dtrace_devi; /* device info */ 202 #endif 203 #if defined(sun) 204 static vmem_t *dtrace_arena; /* probe ID arena */ 205 static vmem_t *dtrace_minor; /* minor number arena */ 206 static taskq_t *dtrace_taskq; /* task queue */ 207 #else 208 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 209 #endif 210 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 211 static int dtrace_nprobes; /* number of probes */ 212 static dtrace_provider_t *dtrace_provider; /* provider list */ 213 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 214 static int dtrace_opens; /* number of opens */ 215 static int dtrace_helpers; /* number of helpers */ 216 #if defined(sun) 217 static void *dtrace_softstate; /* softstate pointer */ 218 #endif 219 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 220 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 221 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 222 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 223 static int dtrace_toxranges; /* number of toxic ranges */ 224 static int dtrace_toxranges_max; /* size of toxic range array */ 225 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 226 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 227 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 228 static kthread_t *dtrace_panicked; /* panicking thread */ 229 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 230 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 231 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 232 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 233 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 234 #if !defined(sun) 235 static struct mtx dtrace_unr_mtx; 236 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 237 int dtrace_in_probe; /* non-zero if executing a probe */ 238 #if defined(__i386__) || defined(__amd64__) || defined(__mips__) 239 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 240 #endif 241 #endif 242 243 /* 244 * DTrace Locking 245 * DTrace is protected by three (relatively coarse-grained) locks: 246 * 247 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 248 * including enabling state, probes, ECBs, consumer state, helper state, 249 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 250 * probe context is lock-free -- synchronization is handled via the 251 * dtrace_sync() cross call mechanism. 252 * 253 * (2) dtrace_provider_lock is required when manipulating provider state, or 254 * when provider state must be held constant. 255 * 256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 257 * when meta provider state must be held constant. 258 * 259 * The lock ordering between these three locks is dtrace_meta_lock before 260 * dtrace_provider_lock before dtrace_lock. (In particular, there are 261 * several places where dtrace_provider_lock is held by the framework as it 262 * calls into the providers -- which then call back into the framework, 263 * grabbing dtrace_lock.) 264 * 265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 267 * role as a coarse-grained lock; it is acquired before both of these locks. 268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 271 * acquired _between_ dtrace_provider_lock and dtrace_lock. 272 */ 273 static kmutex_t dtrace_lock; /* probe state lock */ 274 static kmutex_t dtrace_provider_lock; /* provider state lock */ 275 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 276 277 #if !defined(sun) 278 /* XXX FreeBSD hacks. */ 279 static kmutex_t mod_lock; 280 281 #define cr_suid cr_svuid 282 #define cr_sgid cr_svgid 283 #define ipaddr_t in_addr_t 284 #define mod_modname pathname 285 #define vuprintf vprintf 286 #define ttoproc(_a) ((_a)->td_proc) 287 #define crgetzoneid(_a) 0 288 #define NCPU MAXCPU 289 #define SNOCD 0 290 #define CPU_ON_INTR(_a) 0 291 292 #define PRIV_EFFECTIVE (1 << 0) 293 #define PRIV_DTRACE_KERNEL (1 << 1) 294 #define PRIV_DTRACE_PROC (1 << 2) 295 #define PRIV_DTRACE_USER (1 << 3) 296 #define PRIV_PROC_OWNER (1 << 4) 297 #define PRIV_PROC_ZONE (1 << 5) 298 #define PRIV_ALL ~0 299 300 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 301 #endif 302 303 #if defined(sun) 304 #define curcpu CPU->cpu_id 305 #endif 306 307 308 /* 309 * DTrace Provider Variables 310 * 311 * These are the variables relating to DTrace as a provider (that is, the 312 * provider of the BEGIN, END, and ERROR probes). 313 */ 314 static dtrace_pattr_t dtrace_provider_attr = { 315 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320 }; 321 322 static void 323 dtrace_nullop(void) 324 {} 325 326 static dtrace_pops_t dtrace_provider_ops = { 327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 328 (void (*)(void *, modctl_t *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 NULL, 334 NULL, 335 NULL, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 337 }; 338 339 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 340 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 341 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 342 343 /* 344 * DTrace Helper Tracing Variables 345 */ 346 uint32_t dtrace_helptrace_next = 0; 347 uint32_t dtrace_helptrace_nlocals; 348 char *dtrace_helptrace_buffer; 349 int dtrace_helptrace_bufsize = 512 * 1024; 350 351 #ifdef DEBUG 352 int dtrace_helptrace_enabled = 1; 353 #else 354 int dtrace_helptrace_enabled = 0; 355 #endif 356 357 /* 358 * DTrace Error Hashing 359 * 360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 361 * table. This is very useful for checking coverage of tests that are 362 * expected to induce DIF or DOF processing errors, and may be useful for 363 * debugging problems in the DIF code generator or in DOF generation . The 364 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 365 */ 366 #ifdef DEBUG 367 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 368 static const char *dtrace_errlast; 369 static kthread_t *dtrace_errthread; 370 static kmutex_t dtrace_errlock; 371 #endif 372 373 /* 374 * DTrace Macros and Constants 375 * 376 * These are various macros that are useful in various spots in the 377 * implementation, along with a few random constants that have no meaning 378 * outside of the implementation. There is no real structure to this cpp 379 * mishmash -- but is there ever? 380 */ 381 #define DTRACE_HASHSTR(hash, probe) \ 382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 383 384 #define DTRACE_HASHNEXT(hash, probe) \ 385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 386 387 #define DTRACE_HASHPREV(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 389 390 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 393 394 #define DTRACE_AGGHASHSIZE_SLEW 17 395 396 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 397 398 /* 399 * The key for a thread-local variable consists of the lower 61 bits of the 400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 402 * equal to a variable identifier. This is necessary (but not sufficient) to 403 * assure that global associative arrays never collide with thread-local 404 * variables. To guarantee that they cannot collide, we must also define the 405 * order for keying dynamic variables. That order is: 406 * 407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 408 * 409 * Because the variable-key and the tls-key are in orthogonal spaces, there is 410 * no way for a global variable key signature to match a thread-local key 411 * signature. 412 */ 413 #if defined(sun) 414 #define DTRACE_TLS_THRKEY(where) { \ 415 uint_t intr = 0; \ 416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 417 for (; actv; actv >>= 1) \ 418 intr++; \ 419 ASSERT(intr < (1 << 3)); \ 420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 422 } 423 #else 424 #define DTRACE_TLS_THRKEY(where) { \ 425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 426 uint_t intr = 0; \ 427 uint_t actv = _c->cpu_intr_actv; \ 428 for (; actv; actv >>= 1) \ 429 intr++; \ 430 ASSERT(intr < (1 << 3)); \ 431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 433 } 434 #endif 435 436 #define DT_BSWAP_8(x) ((x) & 0xff) 437 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 438 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 439 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 440 441 #define DT_MASK_LO 0x00000000FFFFFFFFULL 442 443 #define DTRACE_STORE(type, tomax, offset, what) \ 444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 445 446 #ifndef __i386 447 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 448 if (addr & (size - 1)) { \ 449 *flags |= CPU_DTRACE_BADALIGN; \ 450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 451 return (0); \ 452 } 453 #else 454 #define DTRACE_ALIGNCHECK(addr, size, flags) 455 #endif 456 457 /* 458 * Test whether a range of memory starting at testaddr of size testsz falls 459 * within the range of memory described by addr, sz. We take care to avoid 460 * problems with overflow and underflow of the unsigned quantities, and 461 * disallow all negative sizes. Ranges of size 0 are allowed. 462 */ 463 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 464 ((testaddr) - (baseaddr) < (basesz) && \ 465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 466 (testaddr) + (testsz) >= (testaddr)) 467 468 /* 469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 470 * alloc_sz on the righthand side of the comparison in order to avoid overflow 471 * or underflow in the comparison with it. This is simpler than the INRANGE 472 * check above, because we know that the dtms_scratch_ptr is valid in the 473 * range. Allocations of size zero are allowed. 474 */ 475 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 477 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 478 479 #define DTRACE_LOADFUNC(bits) \ 480 /*CSTYLED*/ \ 481 uint##bits##_t \ 482 dtrace_load##bits(uintptr_t addr) \ 483 { \ 484 size_t size = bits / NBBY; \ 485 /*CSTYLED*/ \ 486 uint##bits##_t rval; \ 487 int i; \ 488 volatile uint16_t *flags = (volatile uint16_t *) \ 489 &cpu_core[curcpu].cpuc_dtrace_flags; \ 490 \ 491 DTRACE_ALIGNCHECK(addr, size, flags); \ 492 \ 493 for (i = 0; i < dtrace_toxranges; i++) { \ 494 if (addr >= dtrace_toxrange[i].dtt_limit) \ 495 continue; \ 496 \ 497 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 498 continue; \ 499 \ 500 /* \ 501 * This address falls within a toxic region; return 0. \ 502 */ \ 503 *flags |= CPU_DTRACE_BADADDR; \ 504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 505 return (0); \ 506 } \ 507 \ 508 *flags |= CPU_DTRACE_NOFAULT; \ 509 /*CSTYLED*/ \ 510 rval = *((volatile uint##bits##_t *)addr); \ 511 *flags &= ~CPU_DTRACE_NOFAULT; \ 512 \ 513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 514 } 515 516 #ifdef _LP64 517 #define dtrace_loadptr dtrace_load64 518 #else 519 #define dtrace_loadptr dtrace_load32 520 #endif 521 522 #define DTRACE_DYNHASH_FREE 0 523 #define DTRACE_DYNHASH_SINK 1 524 #define DTRACE_DYNHASH_VALID 2 525 526 #define DTRACE_MATCH_NEXT 0 527 #define DTRACE_MATCH_DONE 1 528 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 529 #define DTRACE_STATE_ALIGN 64 530 531 #define DTRACE_FLAGS2FLT(flags) \ 532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 541 DTRACEFLT_UNKNOWN) 542 543 #define DTRACEACT_ISSTRING(act) \ 544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 546 547 /* Function prototype definitions: */ 548 static size_t dtrace_strlen(const char *, size_t); 549 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 550 static void dtrace_enabling_provide(dtrace_provider_t *); 551 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 552 static void dtrace_enabling_matchall(void); 553 static dtrace_state_t *dtrace_anon_grab(void); 554 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 555 dtrace_state_t *, uint64_t, uint64_t); 556 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 557 static void dtrace_buffer_drop(dtrace_buffer_t *); 558 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 559 dtrace_state_t *, dtrace_mstate_t *); 560 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 561 dtrace_optval_t); 562 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 563 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 564 uint16_t dtrace_load16(uintptr_t); 565 uint32_t dtrace_load32(uintptr_t); 566 uint64_t dtrace_load64(uintptr_t); 567 uint8_t dtrace_load8(uintptr_t); 568 void dtrace_dynvar_clean(dtrace_dstate_t *); 569 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 570 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 571 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 572 573 /* 574 * DTrace Probe Context Functions 575 * 576 * These functions are called from probe context. Because probe context is 577 * any context in which C may be called, arbitrarily locks may be held, 578 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 579 * As a result, functions called from probe context may only call other DTrace 580 * support functions -- they may not interact at all with the system at large. 581 * (Note that the ASSERT macro is made probe-context safe by redefining it in 582 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 583 * loads are to be performed from probe context, they _must_ be in terms of 584 * the safe dtrace_load*() variants. 585 * 586 * Some functions in this block are not actually called from probe context; 587 * for these functions, there will be a comment above the function reading 588 * "Note: not called from probe context." 589 */ 590 void 591 dtrace_panic(const char *format, ...) 592 { 593 va_list alist; 594 595 va_start(alist, format); 596 dtrace_vpanic(format, alist); 597 va_end(alist); 598 } 599 600 int 601 dtrace_assfail(const char *a, const char *f, int l) 602 { 603 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 604 605 /* 606 * We just need something here that even the most clever compiler 607 * cannot optimize away. 608 */ 609 return (a[(uintptr_t)f]); 610 } 611 612 /* 613 * Atomically increment a specified error counter from probe context. 614 */ 615 static void 616 dtrace_error(uint32_t *counter) 617 { 618 /* 619 * Most counters stored to in probe context are per-CPU counters. 620 * However, there are some error conditions that are sufficiently 621 * arcane that they don't merit per-CPU storage. If these counters 622 * are incremented concurrently on different CPUs, scalability will be 623 * adversely affected -- but we don't expect them to be white-hot in a 624 * correctly constructed enabling... 625 */ 626 uint32_t oval, nval; 627 628 do { 629 oval = *counter; 630 631 if ((nval = oval + 1) == 0) { 632 /* 633 * If the counter would wrap, set it to 1 -- assuring 634 * that the counter is never zero when we have seen 635 * errors. (The counter must be 32-bits because we 636 * aren't guaranteed a 64-bit compare&swap operation.) 637 * To save this code both the infamy of being fingered 638 * by a priggish news story and the indignity of being 639 * the target of a neo-puritan witch trial, we're 640 * carefully avoiding any colorful description of the 641 * likelihood of this condition -- but suffice it to 642 * say that it is only slightly more likely than the 643 * overflow of predicate cache IDs, as discussed in 644 * dtrace_predicate_create(). 645 */ 646 nval = 1; 647 } 648 } while (dtrace_cas32(counter, oval, nval) != oval); 649 } 650 651 /* 652 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 653 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 654 */ 655 DTRACE_LOADFUNC(8) 656 DTRACE_LOADFUNC(16) 657 DTRACE_LOADFUNC(32) 658 DTRACE_LOADFUNC(64) 659 660 static int 661 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 662 { 663 if (dest < mstate->dtms_scratch_base) 664 return (0); 665 666 if (dest + size < dest) 667 return (0); 668 669 if (dest + size > mstate->dtms_scratch_ptr) 670 return (0); 671 672 return (1); 673 } 674 675 static int 676 dtrace_canstore_statvar(uint64_t addr, size_t sz, 677 dtrace_statvar_t **svars, int nsvars) 678 { 679 int i; 680 681 for (i = 0; i < nsvars; i++) { 682 dtrace_statvar_t *svar = svars[i]; 683 684 if (svar == NULL || svar->dtsv_size == 0) 685 continue; 686 687 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 688 return (1); 689 } 690 691 return (0); 692 } 693 694 /* 695 * Check to see if the address is within a memory region to which a store may 696 * be issued. This includes the DTrace scratch areas, and any DTrace variable 697 * region. The caller of dtrace_canstore() is responsible for performing any 698 * alignment checks that are needed before stores are actually executed. 699 */ 700 static int 701 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 702 dtrace_vstate_t *vstate) 703 { 704 /* 705 * First, check to see if the address is in scratch space... 706 */ 707 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 708 mstate->dtms_scratch_size)) 709 return (1); 710 711 /* 712 * Now check to see if it's a dynamic variable. This check will pick 713 * up both thread-local variables and any global dynamically-allocated 714 * variables. 715 */ 716 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 717 vstate->dtvs_dynvars.dtds_size)) { 718 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 719 uintptr_t base = (uintptr_t)dstate->dtds_base + 720 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 721 uintptr_t chunkoffs; 722 723 /* 724 * Before we assume that we can store here, we need to make 725 * sure that it isn't in our metadata -- storing to our 726 * dynamic variable metadata would corrupt our state. For 727 * the range to not include any dynamic variable metadata, 728 * it must: 729 * 730 * (1) Start above the hash table that is at the base of 731 * the dynamic variable space 732 * 733 * (2) Have a starting chunk offset that is beyond the 734 * dtrace_dynvar_t that is at the base of every chunk 735 * 736 * (3) Not span a chunk boundary 737 * 738 */ 739 if (addr < base) 740 return (0); 741 742 chunkoffs = (addr - base) % dstate->dtds_chunksize; 743 744 if (chunkoffs < sizeof (dtrace_dynvar_t)) 745 return (0); 746 747 if (chunkoffs + sz > dstate->dtds_chunksize) 748 return (0); 749 750 return (1); 751 } 752 753 /* 754 * Finally, check the static local and global variables. These checks 755 * take the longest, so we perform them last. 756 */ 757 if (dtrace_canstore_statvar(addr, sz, 758 vstate->dtvs_locals, vstate->dtvs_nlocals)) 759 return (1); 760 761 if (dtrace_canstore_statvar(addr, sz, 762 vstate->dtvs_globals, vstate->dtvs_nglobals)) 763 return (1); 764 765 return (0); 766 } 767 768 769 /* 770 * Convenience routine to check to see if the address is within a memory 771 * region in which a load may be issued given the user's privilege level; 772 * if not, it sets the appropriate error flags and loads 'addr' into the 773 * illegal value slot. 774 * 775 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 776 * appropriate memory access protection. 777 */ 778 static int 779 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 780 dtrace_vstate_t *vstate) 781 { 782 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 783 784 /* 785 * If we hold the privilege to read from kernel memory, then 786 * everything is readable. 787 */ 788 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 789 return (1); 790 791 /* 792 * You can obviously read that which you can store. 793 */ 794 if (dtrace_canstore(addr, sz, mstate, vstate)) 795 return (1); 796 797 /* 798 * We're allowed to read from our own string table. 799 */ 800 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 801 mstate->dtms_difo->dtdo_strlen)) 802 return (1); 803 804 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 805 *illval = addr; 806 return (0); 807 } 808 809 /* 810 * Convenience routine to check to see if a given string is within a memory 811 * region in which a load may be issued given the user's privilege level; 812 * this exists so that we don't need to issue unnecessary dtrace_strlen() 813 * calls in the event that the user has all privileges. 814 */ 815 static int 816 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 817 dtrace_vstate_t *vstate) 818 { 819 size_t strsz; 820 821 /* 822 * If we hold the privilege to read from kernel memory, then 823 * everything is readable. 824 */ 825 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 826 return (1); 827 828 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 829 if (dtrace_canload(addr, strsz, mstate, vstate)) 830 return (1); 831 832 return (0); 833 } 834 835 /* 836 * Convenience routine to check to see if a given variable is within a memory 837 * region in which a load may be issued given the user's privilege level. 838 */ 839 static int 840 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 841 dtrace_vstate_t *vstate) 842 { 843 size_t sz; 844 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 845 846 /* 847 * If we hold the privilege to read from kernel memory, then 848 * everything is readable. 849 */ 850 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 851 return (1); 852 853 if (type->dtdt_kind == DIF_TYPE_STRING) 854 sz = dtrace_strlen(src, 855 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 856 else 857 sz = type->dtdt_size; 858 859 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 860 } 861 862 /* 863 * Compare two strings using safe loads. 864 */ 865 static int 866 dtrace_strncmp(char *s1, char *s2, size_t limit) 867 { 868 uint8_t c1, c2; 869 volatile uint16_t *flags; 870 871 if (s1 == s2 || limit == 0) 872 return (0); 873 874 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 875 876 do { 877 if (s1 == NULL) { 878 c1 = '\0'; 879 } else { 880 c1 = dtrace_load8((uintptr_t)s1++); 881 } 882 883 if (s2 == NULL) { 884 c2 = '\0'; 885 } else { 886 c2 = dtrace_load8((uintptr_t)s2++); 887 } 888 889 if (c1 != c2) 890 return (c1 - c2); 891 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 892 893 return (0); 894 } 895 896 /* 897 * Compute strlen(s) for a string using safe memory accesses. The additional 898 * len parameter is used to specify a maximum length to ensure completion. 899 */ 900 static size_t 901 dtrace_strlen(const char *s, size_t lim) 902 { 903 uint_t len; 904 905 for (len = 0; len != lim; len++) { 906 if (dtrace_load8((uintptr_t)s++) == '\0') 907 break; 908 } 909 910 return (len); 911 } 912 913 /* 914 * Check if an address falls within a toxic region. 915 */ 916 static int 917 dtrace_istoxic(uintptr_t kaddr, size_t size) 918 { 919 uintptr_t taddr, tsize; 920 int i; 921 922 for (i = 0; i < dtrace_toxranges; i++) { 923 taddr = dtrace_toxrange[i].dtt_base; 924 tsize = dtrace_toxrange[i].dtt_limit - taddr; 925 926 if (kaddr - taddr < tsize) { 927 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 928 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 929 return (1); 930 } 931 932 if (taddr - kaddr < size) { 933 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 934 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 935 return (1); 936 } 937 } 938 939 return (0); 940 } 941 942 /* 943 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 944 * memory specified by the DIF program. The dst is assumed to be safe memory 945 * that we can store to directly because it is managed by DTrace. As with 946 * standard bcopy, overlapping copies are handled properly. 947 */ 948 static void 949 dtrace_bcopy(const void *src, void *dst, size_t len) 950 { 951 if (len != 0) { 952 uint8_t *s1 = dst; 953 const uint8_t *s2 = src; 954 955 if (s1 <= s2) { 956 do { 957 *s1++ = dtrace_load8((uintptr_t)s2++); 958 } while (--len != 0); 959 } else { 960 s2 += len; 961 s1 += len; 962 963 do { 964 *--s1 = dtrace_load8((uintptr_t)--s2); 965 } while (--len != 0); 966 } 967 } 968 } 969 970 /* 971 * Copy src to dst using safe memory accesses, up to either the specified 972 * length, or the point that a nul byte is encountered. The src is assumed to 973 * be unsafe memory specified by the DIF program. The dst is assumed to be 974 * safe memory that we can store to directly because it is managed by DTrace. 975 * Unlike dtrace_bcopy(), overlapping regions are not handled. 976 */ 977 static void 978 dtrace_strcpy(const void *src, void *dst, size_t len) 979 { 980 if (len != 0) { 981 uint8_t *s1 = dst, c; 982 const uint8_t *s2 = src; 983 984 do { 985 *s1++ = c = dtrace_load8((uintptr_t)s2++); 986 } while (--len != 0 && c != '\0'); 987 } 988 } 989 990 /* 991 * Copy src to dst, deriving the size and type from the specified (BYREF) 992 * variable type. The src is assumed to be unsafe memory specified by the DIF 993 * program. The dst is assumed to be DTrace variable memory that is of the 994 * specified type; we assume that we can store to directly. 995 */ 996 static void 997 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 998 { 999 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1000 1001 if (type->dtdt_kind == DIF_TYPE_STRING) { 1002 dtrace_strcpy(src, dst, type->dtdt_size); 1003 } else { 1004 dtrace_bcopy(src, dst, type->dtdt_size); 1005 } 1006 } 1007 1008 /* 1009 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1010 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1011 * safe memory that we can access directly because it is managed by DTrace. 1012 */ 1013 static int 1014 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1015 { 1016 volatile uint16_t *flags; 1017 1018 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1019 1020 if (s1 == s2) 1021 return (0); 1022 1023 if (s1 == NULL || s2 == NULL) 1024 return (1); 1025 1026 if (s1 != s2 && len != 0) { 1027 const uint8_t *ps1 = s1; 1028 const uint8_t *ps2 = s2; 1029 1030 do { 1031 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1032 return (1); 1033 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1034 } 1035 return (0); 1036 } 1037 1038 /* 1039 * Zero the specified region using a simple byte-by-byte loop. Note that this 1040 * is for safe DTrace-managed memory only. 1041 */ 1042 static void 1043 dtrace_bzero(void *dst, size_t len) 1044 { 1045 uchar_t *cp; 1046 1047 for (cp = dst; len != 0; len--) 1048 *cp++ = 0; 1049 } 1050 1051 static void 1052 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1053 { 1054 uint64_t result[2]; 1055 1056 result[0] = addend1[0] + addend2[0]; 1057 result[1] = addend1[1] + addend2[1] + 1058 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1059 1060 sum[0] = result[0]; 1061 sum[1] = result[1]; 1062 } 1063 1064 /* 1065 * Shift the 128-bit value in a by b. If b is positive, shift left. 1066 * If b is negative, shift right. 1067 */ 1068 static void 1069 dtrace_shift_128(uint64_t *a, int b) 1070 { 1071 uint64_t mask; 1072 1073 if (b == 0) 1074 return; 1075 1076 if (b < 0) { 1077 b = -b; 1078 if (b >= 64) { 1079 a[0] = a[1] >> (b - 64); 1080 a[1] = 0; 1081 } else { 1082 a[0] >>= b; 1083 mask = 1LL << (64 - b); 1084 mask -= 1; 1085 a[0] |= ((a[1] & mask) << (64 - b)); 1086 a[1] >>= b; 1087 } 1088 } else { 1089 if (b >= 64) { 1090 a[1] = a[0] << (b - 64); 1091 a[0] = 0; 1092 } else { 1093 a[1] <<= b; 1094 mask = a[0] >> (64 - b); 1095 a[1] |= mask; 1096 a[0] <<= b; 1097 } 1098 } 1099 } 1100 1101 /* 1102 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1103 * use native multiplication on those, and then re-combine into the 1104 * resulting 128-bit value. 1105 * 1106 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1107 * hi1 * hi2 << 64 + 1108 * hi1 * lo2 << 32 + 1109 * hi2 * lo1 << 32 + 1110 * lo1 * lo2 1111 */ 1112 static void 1113 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1114 { 1115 uint64_t hi1, hi2, lo1, lo2; 1116 uint64_t tmp[2]; 1117 1118 hi1 = factor1 >> 32; 1119 hi2 = factor2 >> 32; 1120 1121 lo1 = factor1 & DT_MASK_LO; 1122 lo2 = factor2 & DT_MASK_LO; 1123 1124 product[0] = lo1 * lo2; 1125 product[1] = hi1 * hi2; 1126 1127 tmp[0] = hi1 * lo2; 1128 tmp[1] = 0; 1129 dtrace_shift_128(tmp, 32); 1130 dtrace_add_128(product, tmp, product); 1131 1132 tmp[0] = hi2 * lo1; 1133 tmp[1] = 0; 1134 dtrace_shift_128(tmp, 32); 1135 dtrace_add_128(product, tmp, product); 1136 } 1137 1138 /* 1139 * This privilege check should be used by actions and subroutines to 1140 * verify that the user credentials of the process that enabled the 1141 * invoking ECB match the target credentials 1142 */ 1143 static int 1144 dtrace_priv_proc_common_user(dtrace_state_t *state) 1145 { 1146 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1147 1148 /* 1149 * We should always have a non-NULL state cred here, since if cred 1150 * is null (anonymous tracing), we fast-path bypass this routine. 1151 */ 1152 ASSERT(s_cr != NULL); 1153 1154 if ((cr = CRED()) != NULL && 1155 s_cr->cr_uid == cr->cr_uid && 1156 s_cr->cr_uid == cr->cr_ruid && 1157 s_cr->cr_uid == cr->cr_suid && 1158 s_cr->cr_gid == cr->cr_gid && 1159 s_cr->cr_gid == cr->cr_rgid && 1160 s_cr->cr_gid == cr->cr_sgid) 1161 return (1); 1162 1163 return (0); 1164 } 1165 1166 /* 1167 * This privilege check should be used by actions and subroutines to 1168 * verify that the zone of the process that enabled the invoking ECB 1169 * matches the target credentials 1170 */ 1171 static int 1172 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1173 { 1174 #if defined(sun) 1175 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1176 1177 /* 1178 * We should always have a non-NULL state cred here, since if cred 1179 * is null (anonymous tracing), we fast-path bypass this routine. 1180 */ 1181 ASSERT(s_cr != NULL); 1182 1183 if ((cr = CRED()) != NULL && 1184 s_cr->cr_zone == cr->cr_zone) 1185 return (1); 1186 1187 return (0); 1188 #else 1189 return (1); 1190 #endif 1191 } 1192 1193 /* 1194 * This privilege check should be used by actions and subroutines to 1195 * verify that the process has not setuid or changed credentials. 1196 */ 1197 static int 1198 dtrace_priv_proc_common_nocd(void) 1199 { 1200 proc_t *proc; 1201 1202 if ((proc = ttoproc(curthread)) != NULL && 1203 !(proc->p_flag & SNOCD)) 1204 return (1); 1205 1206 return (0); 1207 } 1208 1209 static int 1210 dtrace_priv_proc_destructive(dtrace_state_t *state) 1211 { 1212 int action = state->dts_cred.dcr_action; 1213 1214 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1215 dtrace_priv_proc_common_zone(state) == 0) 1216 goto bad; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1219 dtrace_priv_proc_common_user(state) == 0) 1220 goto bad; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1223 dtrace_priv_proc_common_nocd() == 0) 1224 goto bad; 1225 1226 return (1); 1227 1228 bad: 1229 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1230 1231 return (0); 1232 } 1233 1234 static int 1235 dtrace_priv_proc_control(dtrace_state_t *state) 1236 { 1237 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1238 return (1); 1239 1240 if (dtrace_priv_proc_common_zone(state) && 1241 dtrace_priv_proc_common_user(state) && 1242 dtrace_priv_proc_common_nocd()) 1243 return (1); 1244 1245 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1246 1247 return (0); 1248 } 1249 1250 static int 1251 dtrace_priv_proc(dtrace_state_t *state) 1252 { 1253 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1254 return (1); 1255 1256 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1257 1258 return (0); 1259 } 1260 1261 static int 1262 dtrace_priv_kernel(dtrace_state_t *state) 1263 { 1264 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1265 return (1); 1266 1267 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1268 1269 return (0); 1270 } 1271 1272 static int 1273 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1274 { 1275 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1276 return (1); 1277 1278 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1279 1280 return (0); 1281 } 1282 1283 /* 1284 * Note: not called from probe context. This function is called 1285 * asynchronously (and at a regular interval) from outside of probe context to 1286 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1287 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1288 */ 1289 void 1290 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1291 { 1292 dtrace_dynvar_t *dirty; 1293 dtrace_dstate_percpu_t *dcpu; 1294 int i, work = 0; 1295 1296 for (i = 0; i < NCPU; i++) { 1297 dcpu = &dstate->dtds_percpu[i]; 1298 1299 ASSERT(dcpu->dtdsc_rinsing == NULL); 1300 1301 /* 1302 * If the dirty list is NULL, there is no dirty work to do. 1303 */ 1304 if (dcpu->dtdsc_dirty == NULL) 1305 continue; 1306 1307 /* 1308 * If the clean list is non-NULL, then we're not going to do 1309 * any work for this CPU -- it means that there has not been 1310 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1311 * since the last time we cleaned house. 1312 */ 1313 if (dcpu->dtdsc_clean != NULL) 1314 continue; 1315 1316 work = 1; 1317 1318 /* 1319 * Atomically move the dirty list aside. 1320 */ 1321 do { 1322 dirty = dcpu->dtdsc_dirty; 1323 1324 /* 1325 * Before we zap the dirty list, set the rinsing list. 1326 * (This allows for a potential assertion in 1327 * dtrace_dynvar(): if a free dynamic variable appears 1328 * on a hash chain, either the dirty list or the 1329 * rinsing list for some CPU must be non-NULL.) 1330 */ 1331 dcpu->dtdsc_rinsing = dirty; 1332 dtrace_membar_producer(); 1333 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1334 dirty, NULL) != dirty); 1335 } 1336 1337 if (!work) { 1338 /* 1339 * We have no work to do; we can simply return. 1340 */ 1341 return; 1342 } 1343 1344 dtrace_sync(); 1345 1346 for (i = 0; i < NCPU; i++) { 1347 dcpu = &dstate->dtds_percpu[i]; 1348 1349 if (dcpu->dtdsc_rinsing == NULL) 1350 continue; 1351 1352 /* 1353 * We are now guaranteed that no hash chain contains a pointer 1354 * into this dirty list; we can make it clean. 1355 */ 1356 ASSERT(dcpu->dtdsc_clean == NULL); 1357 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1358 dcpu->dtdsc_rinsing = NULL; 1359 } 1360 1361 /* 1362 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1363 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1364 * This prevents a race whereby a CPU incorrectly decides that 1365 * the state should be something other than DTRACE_DSTATE_CLEAN 1366 * after dtrace_dynvar_clean() has completed. 1367 */ 1368 dtrace_sync(); 1369 1370 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1371 } 1372 1373 /* 1374 * Depending on the value of the op parameter, this function looks-up, 1375 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1376 * allocation is requested, this function will return a pointer to a 1377 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1378 * variable can be allocated. If NULL is returned, the appropriate counter 1379 * will be incremented. 1380 */ 1381 dtrace_dynvar_t * 1382 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1383 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1384 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1385 { 1386 uint64_t hashval = DTRACE_DYNHASH_VALID; 1387 dtrace_dynhash_t *hash = dstate->dtds_hash; 1388 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1389 processorid_t me = curcpu, cpu = me; 1390 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1391 size_t bucket, ksize; 1392 size_t chunksize = dstate->dtds_chunksize; 1393 uintptr_t kdata, lock, nstate; 1394 uint_t i; 1395 1396 ASSERT(nkeys != 0); 1397 1398 /* 1399 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1400 * algorithm. For the by-value portions, we perform the algorithm in 1401 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1402 * bit, and seems to have only a minute effect on distribution. For 1403 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1404 * over each referenced byte. It's painful to do this, but it's much 1405 * better than pathological hash distribution. The efficacy of the 1406 * hashing algorithm (and a comparison with other algorithms) may be 1407 * found by running the ::dtrace_dynstat MDB dcmd. 1408 */ 1409 for (i = 0; i < nkeys; i++) { 1410 if (key[i].dttk_size == 0) { 1411 uint64_t val = key[i].dttk_value; 1412 1413 hashval += (val >> 48) & 0xffff; 1414 hashval += (hashval << 10); 1415 hashval ^= (hashval >> 6); 1416 1417 hashval += (val >> 32) & 0xffff; 1418 hashval += (hashval << 10); 1419 hashval ^= (hashval >> 6); 1420 1421 hashval += (val >> 16) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += val & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 } else { 1429 /* 1430 * This is incredibly painful, but it beats the hell 1431 * out of the alternative. 1432 */ 1433 uint64_t j, size = key[i].dttk_size; 1434 uintptr_t base = (uintptr_t)key[i].dttk_value; 1435 1436 if (!dtrace_canload(base, size, mstate, vstate)) 1437 break; 1438 1439 for (j = 0; j < size; j++) { 1440 hashval += dtrace_load8(base + j); 1441 hashval += (hashval << 10); 1442 hashval ^= (hashval >> 6); 1443 } 1444 } 1445 } 1446 1447 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1448 return (NULL); 1449 1450 hashval += (hashval << 3); 1451 hashval ^= (hashval >> 11); 1452 hashval += (hashval << 15); 1453 1454 /* 1455 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1456 * comes out to be one of our two sentinel hash values. If this 1457 * actually happens, we set the hashval to be a value known to be a 1458 * non-sentinel value. 1459 */ 1460 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1461 hashval = DTRACE_DYNHASH_VALID; 1462 1463 /* 1464 * Yes, it's painful to do a divide here. If the cycle count becomes 1465 * important here, tricks can be pulled to reduce it. (However, it's 1466 * critical that hash collisions be kept to an absolute minimum; 1467 * they're much more painful than a divide.) It's better to have a 1468 * solution that generates few collisions and still keeps things 1469 * relatively simple. 1470 */ 1471 bucket = hashval % dstate->dtds_hashsize; 1472 1473 if (op == DTRACE_DYNVAR_DEALLOC) { 1474 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1475 1476 for (;;) { 1477 while ((lock = *lockp) & 1) 1478 continue; 1479 1480 if (dtrace_casptr((volatile void *)lockp, 1481 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1482 break; 1483 } 1484 1485 dtrace_membar_producer(); 1486 } 1487 1488 top: 1489 prev = NULL; 1490 lock = hash[bucket].dtdh_lock; 1491 1492 dtrace_membar_consumer(); 1493 1494 start = hash[bucket].dtdh_chain; 1495 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1496 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1497 op != DTRACE_DYNVAR_DEALLOC)); 1498 1499 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1500 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1501 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1502 1503 if (dvar->dtdv_hashval != hashval) { 1504 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1505 /* 1506 * We've reached the sink, and therefore the 1507 * end of the hash chain; we can kick out of 1508 * the loop knowing that we have seen a valid 1509 * snapshot of state. 1510 */ 1511 ASSERT(dvar->dtdv_next == NULL); 1512 ASSERT(dvar == &dtrace_dynhash_sink); 1513 break; 1514 } 1515 1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1517 /* 1518 * We've gone off the rails: somewhere along 1519 * the line, one of the members of this hash 1520 * chain was deleted. Note that we could also 1521 * detect this by simply letting this loop run 1522 * to completion, as we would eventually hit 1523 * the end of the dirty list. However, we 1524 * want to avoid running the length of the 1525 * dirty list unnecessarily (it might be quite 1526 * long), so we catch this as early as 1527 * possible by detecting the hash marker. In 1528 * this case, we simply set dvar to NULL and 1529 * break; the conditional after the loop will 1530 * send us back to top. 1531 */ 1532 dvar = NULL; 1533 break; 1534 } 1535 1536 goto next; 1537 } 1538 1539 if (dtuple->dtt_nkeys != nkeys) 1540 goto next; 1541 1542 for (i = 0; i < nkeys; i++, dkey++) { 1543 if (dkey->dttk_size != key[i].dttk_size) 1544 goto next; /* size or type mismatch */ 1545 1546 if (dkey->dttk_size != 0) { 1547 if (dtrace_bcmp( 1548 (void *)(uintptr_t)key[i].dttk_value, 1549 (void *)(uintptr_t)dkey->dttk_value, 1550 dkey->dttk_size)) 1551 goto next; 1552 } else { 1553 if (dkey->dttk_value != key[i].dttk_value) 1554 goto next; 1555 } 1556 } 1557 1558 if (op != DTRACE_DYNVAR_DEALLOC) 1559 return (dvar); 1560 1561 ASSERT(dvar->dtdv_next == NULL || 1562 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1563 1564 if (prev != NULL) { 1565 ASSERT(hash[bucket].dtdh_chain != dvar); 1566 ASSERT(start != dvar); 1567 ASSERT(prev->dtdv_next == dvar); 1568 prev->dtdv_next = dvar->dtdv_next; 1569 } else { 1570 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1571 start, dvar->dtdv_next) != start) { 1572 /* 1573 * We have failed to atomically swing the 1574 * hash table head pointer, presumably because 1575 * of a conflicting allocation on another CPU. 1576 * We need to reread the hash chain and try 1577 * again. 1578 */ 1579 goto top; 1580 } 1581 } 1582 1583 dtrace_membar_producer(); 1584 1585 /* 1586 * Now set the hash value to indicate that it's free. 1587 */ 1588 ASSERT(hash[bucket].dtdh_chain != dvar); 1589 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1590 1591 dtrace_membar_producer(); 1592 1593 /* 1594 * Set the next pointer to point at the dirty list, and 1595 * atomically swing the dirty pointer to the newly freed dvar. 1596 */ 1597 do { 1598 next = dcpu->dtdsc_dirty; 1599 dvar->dtdv_next = next; 1600 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1601 1602 /* 1603 * Finally, unlock this hash bucket. 1604 */ 1605 ASSERT(hash[bucket].dtdh_lock == lock); 1606 ASSERT(lock & 1); 1607 hash[bucket].dtdh_lock++; 1608 1609 return (NULL); 1610 next: 1611 prev = dvar; 1612 continue; 1613 } 1614 1615 if (dvar == NULL) { 1616 /* 1617 * If dvar is NULL, it is because we went off the rails: 1618 * one of the elements that we traversed in the hash chain 1619 * was deleted while we were traversing it. In this case, 1620 * we assert that we aren't doing a dealloc (deallocs lock 1621 * the hash bucket to prevent themselves from racing with 1622 * one another), and retry the hash chain traversal. 1623 */ 1624 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1625 goto top; 1626 } 1627 1628 if (op != DTRACE_DYNVAR_ALLOC) { 1629 /* 1630 * If we are not to allocate a new variable, we want to 1631 * return NULL now. Before we return, check that the value 1632 * of the lock word hasn't changed. If it has, we may have 1633 * seen an inconsistent snapshot. 1634 */ 1635 if (op == DTRACE_DYNVAR_NOALLOC) { 1636 if (hash[bucket].dtdh_lock != lock) 1637 goto top; 1638 } else { 1639 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1640 ASSERT(hash[bucket].dtdh_lock == lock); 1641 ASSERT(lock & 1); 1642 hash[bucket].dtdh_lock++; 1643 } 1644 1645 return (NULL); 1646 } 1647 1648 /* 1649 * We need to allocate a new dynamic variable. The size we need is the 1650 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1651 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1652 * the size of any referred-to data (dsize). We then round the final 1653 * size up to the chunksize for allocation. 1654 */ 1655 for (ksize = 0, i = 0; i < nkeys; i++) 1656 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1657 1658 /* 1659 * This should be pretty much impossible, but could happen if, say, 1660 * strange DIF specified the tuple. Ideally, this should be an 1661 * assertion and not an error condition -- but that requires that the 1662 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1663 * bullet-proof. (That is, it must not be able to be fooled by 1664 * malicious DIF.) Given the lack of backwards branches in DIF, 1665 * solving this would presumably not amount to solving the Halting 1666 * Problem -- but it still seems awfully hard. 1667 */ 1668 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1669 ksize + dsize > chunksize) { 1670 dcpu->dtdsc_drops++; 1671 return (NULL); 1672 } 1673 1674 nstate = DTRACE_DSTATE_EMPTY; 1675 1676 do { 1677 retry: 1678 free = dcpu->dtdsc_free; 1679 1680 if (free == NULL) { 1681 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1682 void *rval; 1683 1684 if (clean == NULL) { 1685 /* 1686 * We're out of dynamic variable space on 1687 * this CPU. Unless we have tried all CPUs, 1688 * we'll try to allocate from a different 1689 * CPU. 1690 */ 1691 switch (dstate->dtds_state) { 1692 case DTRACE_DSTATE_CLEAN: { 1693 void *sp = &dstate->dtds_state; 1694 1695 if (++cpu >= NCPU) 1696 cpu = 0; 1697 1698 if (dcpu->dtdsc_dirty != NULL && 1699 nstate == DTRACE_DSTATE_EMPTY) 1700 nstate = DTRACE_DSTATE_DIRTY; 1701 1702 if (dcpu->dtdsc_rinsing != NULL) 1703 nstate = DTRACE_DSTATE_RINSING; 1704 1705 dcpu = &dstate->dtds_percpu[cpu]; 1706 1707 if (cpu != me) 1708 goto retry; 1709 1710 (void) dtrace_cas32(sp, 1711 DTRACE_DSTATE_CLEAN, nstate); 1712 1713 /* 1714 * To increment the correct bean 1715 * counter, take another lap. 1716 */ 1717 goto retry; 1718 } 1719 1720 case DTRACE_DSTATE_DIRTY: 1721 dcpu->dtdsc_dirty_drops++; 1722 break; 1723 1724 case DTRACE_DSTATE_RINSING: 1725 dcpu->dtdsc_rinsing_drops++; 1726 break; 1727 1728 case DTRACE_DSTATE_EMPTY: 1729 dcpu->dtdsc_drops++; 1730 break; 1731 } 1732 1733 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1734 return (NULL); 1735 } 1736 1737 /* 1738 * The clean list appears to be non-empty. We want to 1739 * move the clean list to the free list; we start by 1740 * moving the clean pointer aside. 1741 */ 1742 if (dtrace_casptr(&dcpu->dtdsc_clean, 1743 clean, NULL) != clean) { 1744 /* 1745 * We are in one of two situations: 1746 * 1747 * (a) The clean list was switched to the 1748 * free list by another CPU. 1749 * 1750 * (b) The clean list was added to by the 1751 * cleansing cyclic. 1752 * 1753 * In either of these situations, we can 1754 * just reattempt the free list allocation. 1755 */ 1756 goto retry; 1757 } 1758 1759 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1760 1761 /* 1762 * Now we'll move the clean list to the free list. 1763 * It's impossible for this to fail: the only way 1764 * the free list can be updated is through this 1765 * code path, and only one CPU can own the clean list. 1766 * Thus, it would only be possible for this to fail if 1767 * this code were racing with dtrace_dynvar_clean(). 1768 * (That is, if dtrace_dynvar_clean() updated the clean 1769 * list, and we ended up racing to update the free 1770 * list.) This race is prevented by the dtrace_sync() 1771 * in dtrace_dynvar_clean() -- which flushes the 1772 * owners of the clean lists out before resetting 1773 * the clean lists. 1774 */ 1775 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1776 ASSERT(rval == NULL); 1777 goto retry; 1778 } 1779 1780 dvar = free; 1781 new_free = dvar->dtdv_next; 1782 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1783 1784 /* 1785 * We have now allocated a new chunk. We copy the tuple keys into the 1786 * tuple array and copy any referenced key data into the data space 1787 * following the tuple array. As we do this, we relocate dttk_value 1788 * in the final tuple to point to the key data address in the chunk. 1789 */ 1790 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1791 dvar->dtdv_data = (void *)(kdata + ksize); 1792 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1793 1794 for (i = 0; i < nkeys; i++) { 1795 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1796 size_t kesize = key[i].dttk_size; 1797 1798 if (kesize != 0) { 1799 dtrace_bcopy( 1800 (const void *)(uintptr_t)key[i].dttk_value, 1801 (void *)kdata, kesize); 1802 dkey->dttk_value = kdata; 1803 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1804 } else { 1805 dkey->dttk_value = key[i].dttk_value; 1806 } 1807 1808 dkey->dttk_size = kesize; 1809 } 1810 1811 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1812 dvar->dtdv_hashval = hashval; 1813 dvar->dtdv_next = start; 1814 1815 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1816 return (dvar); 1817 1818 /* 1819 * The cas has failed. Either another CPU is adding an element to 1820 * this hash chain, or another CPU is deleting an element from this 1821 * hash chain. The simplest way to deal with both of these cases 1822 * (though not necessarily the most efficient) is to free our 1823 * allocated block and tail-call ourselves. Note that the free is 1824 * to the dirty list and _not_ to the free list. This is to prevent 1825 * races with allocators, above. 1826 */ 1827 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1828 1829 dtrace_membar_producer(); 1830 1831 do { 1832 free = dcpu->dtdsc_dirty; 1833 dvar->dtdv_next = free; 1834 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1835 1836 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1837 } 1838 1839 /*ARGSUSED*/ 1840 static void 1841 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1842 { 1843 if ((int64_t)nval < (int64_t)*oval) 1844 *oval = nval; 1845 } 1846 1847 /*ARGSUSED*/ 1848 static void 1849 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1850 { 1851 if ((int64_t)nval > (int64_t)*oval) 1852 *oval = nval; 1853 } 1854 1855 static void 1856 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1857 { 1858 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1859 int64_t val = (int64_t)nval; 1860 1861 if (val < 0) { 1862 for (i = 0; i < zero; i++) { 1863 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1864 quanta[i] += incr; 1865 return; 1866 } 1867 } 1868 } else { 1869 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1870 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1871 quanta[i - 1] += incr; 1872 return; 1873 } 1874 } 1875 1876 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1877 return; 1878 } 1879 1880 ASSERT(0); 1881 } 1882 1883 static void 1884 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1885 { 1886 uint64_t arg = *lquanta++; 1887 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1888 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1889 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1890 int32_t val = (int32_t)nval, level; 1891 1892 ASSERT(step != 0); 1893 ASSERT(levels != 0); 1894 1895 if (val < base) { 1896 /* 1897 * This is an underflow. 1898 */ 1899 lquanta[0] += incr; 1900 return; 1901 } 1902 1903 level = (val - base) / step; 1904 1905 if (level < levels) { 1906 lquanta[level + 1] += incr; 1907 return; 1908 } 1909 1910 /* 1911 * This is an overflow. 1912 */ 1913 lquanta[levels + 1] += incr; 1914 } 1915 1916 static int 1917 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1918 uint16_t high, uint16_t nsteps, int64_t value) 1919 { 1920 int64_t this = 1, last, next; 1921 int base = 1, order; 1922 1923 ASSERT(factor <= nsteps); 1924 ASSERT(nsteps % factor == 0); 1925 1926 for (order = 0; order < low; order++) 1927 this *= factor; 1928 1929 /* 1930 * If our value is less than our factor taken to the power of the 1931 * low order of magnitude, it goes into the zeroth bucket. 1932 */ 1933 if (value < (last = this)) 1934 return (0); 1935 1936 for (this *= factor; order <= high; order++) { 1937 int nbuckets = this > nsteps ? nsteps : this; 1938 1939 if ((next = this * factor) < this) { 1940 /* 1941 * We should not generally get log/linear quantizations 1942 * with a high magnitude that allows 64-bits to 1943 * overflow, but we nonetheless protect against this 1944 * by explicitly checking for overflow, and clamping 1945 * our value accordingly. 1946 */ 1947 value = this - 1; 1948 } 1949 1950 if (value < this) { 1951 /* 1952 * If our value lies within this order of magnitude, 1953 * determine its position by taking the offset within 1954 * the order of magnitude, dividing by the bucket 1955 * width, and adding to our (accumulated) base. 1956 */ 1957 return (base + (value - last) / (this / nbuckets)); 1958 } 1959 1960 base += nbuckets - (nbuckets / factor); 1961 last = this; 1962 this = next; 1963 } 1964 1965 /* 1966 * Our value is greater than or equal to our factor taken to the 1967 * power of one plus the high magnitude -- return the top bucket. 1968 */ 1969 return (base); 1970 } 1971 1972 static void 1973 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1974 { 1975 uint64_t arg = *llquanta++; 1976 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1977 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1978 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1979 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1980 1981 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1982 low, high, nsteps, nval)] += incr; 1983 } 1984 1985 /*ARGSUSED*/ 1986 static void 1987 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1988 { 1989 data[0]++; 1990 data[1] += nval; 1991 } 1992 1993 /*ARGSUSED*/ 1994 static void 1995 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1996 { 1997 int64_t snval = (int64_t)nval; 1998 uint64_t tmp[2]; 1999 2000 data[0]++; 2001 data[1] += nval; 2002 2003 /* 2004 * What we want to say here is: 2005 * 2006 * data[2] += nval * nval; 2007 * 2008 * But given that nval is 64-bit, we could easily overflow, so 2009 * we do this as 128-bit arithmetic. 2010 */ 2011 if (snval < 0) 2012 snval = -snval; 2013 2014 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2015 dtrace_add_128(data + 2, tmp, data + 2); 2016 } 2017 2018 /*ARGSUSED*/ 2019 static void 2020 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2021 { 2022 *oval = *oval + 1; 2023 } 2024 2025 /*ARGSUSED*/ 2026 static void 2027 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2028 { 2029 *oval += nval; 2030 } 2031 2032 /* 2033 * Aggregate given the tuple in the principal data buffer, and the aggregating 2034 * action denoted by the specified dtrace_aggregation_t. The aggregation 2035 * buffer is specified as the buf parameter. This routine does not return 2036 * failure; if there is no space in the aggregation buffer, the data will be 2037 * dropped, and a corresponding counter incremented. 2038 */ 2039 static void 2040 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2041 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2042 { 2043 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2044 uint32_t i, ndx, size, fsize; 2045 uint32_t align = sizeof (uint64_t) - 1; 2046 dtrace_aggbuffer_t *agb; 2047 dtrace_aggkey_t *key; 2048 uint32_t hashval = 0, limit, isstr; 2049 caddr_t tomax, data, kdata; 2050 dtrace_actkind_t action; 2051 dtrace_action_t *act; 2052 uintptr_t offs; 2053 2054 if (buf == NULL) 2055 return; 2056 2057 if (!agg->dtag_hasarg) { 2058 /* 2059 * Currently, only quantize() and lquantize() take additional 2060 * arguments, and they have the same semantics: an increment 2061 * value that defaults to 1 when not present. If additional 2062 * aggregating actions take arguments, the setting of the 2063 * default argument value will presumably have to become more 2064 * sophisticated... 2065 */ 2066 arg = 1; 2067 } 2068 2069 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2070 size = rec->dtrd_offset - agg->dtag_base; 2071 fsize = size + rec->dtrd_size; 2072 2073 ASSERT(dbuf->dtb_tomax != NULL); 2074 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2075 2076 if ((tomax = buf->dtb_tomax) == NULL) { 2077 dtrace_buffer_drop(buf); 2078 return; 2079 } 2080 2081 /* 2082 * The metastructure is always at the bottom of the buffer. 2083 */ 2084 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2085 sizeof (dtrace_aggbuffer_t)); 2086 2087 if (buf->dtb_offset == 0) { 2088 /* 2089 * We just kludge up approximately 1/8th of the size to be 2090 * buckets. If this guess ends up being routinely 2091 * off-the-mark, we may need to dynamically readjust this 2092 * based on past performance. 2093 */ 2094 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2095 2096 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2097 (uintptr_t)tomax || hashsize == 0) { 2098 /* 2099 * We've been given a ludicrously small buffer; 2100 * increment our drop count and leave. 2101 */ 2102 dtrace_buffer_drop(buf); 2103 return; 2104 } 2105 2106 /* 2107 * And now, a pathetic attempt to try to get a an odd (or 2108 * perchance, a prime) hash size for better hash distribution. 2109 */ 2110 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2111 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2112 2113 agb->dtagb_hashsize = hashsize; 2114 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2115 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2116 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2117 2118 for (i = 0; i < agb->dtagb_hashsize; i++) 2119 agb->dtagb_hash[i] = NULL; 2120 } 2121 2122 ASSERT(agg->dtag_first != NULL); 2123 ASSERT(agg->dtag_first->dta_intuple); 2124 2125 /* 2126 * Calculate the hash value based on the key. Note that we _don't_ 2127 * include the aggid in the hashing (but we will store it as part of 2128 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2129 * algorithm: a simple, quick algorithm that has no known funnels, and 2130 * gets good distribution in practice. The efficacy of the hashing 2131 * algorithm (and a comparison with other algorithms) may be found by 2132 * running the ::dtrace_aggstat MDB dcmd. 2133 */ 2134 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2135 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2136 limit = i + act->dta_rec.dtrd_size; 2137 ASSERT(limit <= size); 2138 isstr = DTRACEACT_ISSTRING(act); 2139 2140 for (; i < limit; i++) { 2141 hashval += data[i]; 2142 hashval += (hashval << 10); 2143 hashval ^= (hashval >> 6); 2144 2145 if (isstr && data[i] == '\0') 2146 break; 2147 } 2148 } 2149 2150 hashval += (hashval << 3); 2151 hashval ^= (hashval >> 11); 2152 hashval += (hashval << 15); 2153 2154 /* 2155 * Yes, the divide here is expensive -- but it's generally the least 2156 * of the performance issues given the amount of data that we iterate 2157 * over to compute hash values, compare data, etc. 2158 */ 2159 ndx = hashval % agb->dtagb_hashsize; 2160 2161 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2162 ASSERT((caddr_t)key >= tomax); 2163 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2164 2165 if (hashval != key->dtak_hashval || key->dtak_size != size) 2166 continue; 2167 2168 kdata = key->dtak_data; 2169 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2170 2171 for (act = agg->dtag_first; act->dta_intuple; 2172 act = act->dta_next) { 2173 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2174 limit = i + act->dta_rec.dtrd_size; 2175 ASSERT(limit <= size); 2176 isstr = DTRACEACT_ISSTRING(act); 2177 2178 for (; i < limit; i++) { 2179 if (kdata[i] != data[i]) 2180 goto next; 2181 2182 if (isstr && data[i] == '\0') 2183 break; 2184 } 2185 } 2186 2187 if (action != key->dtak_action) { 2188 /* 2189 * We are aggregating on the same value in the same 2190 * aggregation with two different aggregating actions. 2191 * (This should have been picked up in the compiler, 2192 * so we may be dealing with errant or devious DIF.) 2193 * This is an error condition; we indicate as much, 2194 * and return. 2195 */ 2196 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2197 return; 2198 } 2199 2200 /* 2201 * This is a hit: we need to apply the aggregator to 2202 * the value at this key. 2203 */ 2204 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2205 return; 2206 next: 2207 continue; 2208 } 2209 2210 /* 2211 * We didn't find it. We need to allocate some zero-filled space, 2212 * link it into the hash table appropriately, and apply the aggregator 2213 * to the (zero-filled) value. 2214 */ 2215 offs = buf->dtb_offset; 2216 while (offs & (align - 1)) 2217 offs += sizeof (uint32_t); 2218 2219 /* 2220 * If we don't have enough room to both allocate a new key _and_ 2221 * its associated data, increment the drop count and return. 2222 */ 2223 if ((uintptr_t)tomax + offs + fsize > 2224 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2225 dtrace_buffer_drop(buf); 2226 return; 2227 } 2228 2229 /*CONSTCOND*/ 2230 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2231 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2232 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2233 2234 key->dtak_data = kdata = tomax + offs; 2235 buf->dtb_offset = offs + fsize; 2236 2237 /* 2238 * Now copy the data across. 2239 */ 2240 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2241 2242 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2243 kdata[i] = data[i]; 2244 2245 /* 2246 * Because strings are not zeroed out by default, we need to iterate 2247 * looking for actions that store strings, and we need to explicitly 2248 * pad these strings out with zeroes. 2249 */ 2250 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2251 int nul; 2252 2253 if (!DTRACEACT_ISSTRING(act)) 2254 continue; 2255 2256 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2257 limit = i + act->dta_rec.dtrd_size; 2258 ASSERT(limit <= size); 2259 2260 for (nul = 0; i < limit; i++) { 2261 if (nul) { 2262 kdata[i] = '\0'; 2263 continue; 2264 } 2265 2266 if (data[i] != '\0') 2267 continue; 2268 2269 nul = 1; 2270 } 2271 } 2272 2273 for (i = size; i < fsize; i++) 2274 kdata[i] = 0; 2275 2276 key->dtak_hashval = hashval; 2277 key->dtak_size = size; 2278 key->dtak_action = action; 2279 key->dtak_next = agb->dtagb_hash[ndx]; 2280 agb->dtagb_hash[ndx] = key; 2281 2282 /* 2283 * Finally, apply the aggregator. 2284 */ 2285 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2286 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2287 } 2288 2289 /* 2290 * Given consumer state, this routine finds a speculation in the INACTIVE 2291 * state and transitions it into the ACTIVE state. If there is no speculation 2292 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2293 * incremented -- it is up to the caller to take appropriate action. 2294 */ 2295 static int 2296 dtrace_speculation(dtrace_state_t *state) 2297 { 2298 int i = 0; 2299 dtrace_speculation_state_t current; 2300 uint32_t *stat = &state->dts_speculations_unavail, count; 2301 2302 while (i < state->dts_nspeculations) { 2303 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2304 2305 current = spec->dtsp_state; 2306 2307 if (current != DTRACESPEC_INACTIVE) { 2308 if (current == DTRACESPEC_COMMITTINGMANY || 2309 current == DTRACESPEC_COMMITTING || 2310 current == DTRACESPEC_DISCARDING) 2311 stat = &state->dts_speculations_busy; 2312 i++; 2313 continue; 2314 } 2315 2316 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2317 current, DTRACESPEC_ACTIVE) == current) 2318 return (i + 1); 2319 } 2320 2321 /* 2322 * We couldn't find a speculation. If we found as much as a single 2323 * busy speculation buffer, we'll attribute this failure as "busy" 2324 * instead of "unavail". 2325 */ 2326 do { 2327 count = *stat; 2328 } while (dtrace_cas32(stat, count, count + 1) != count); 2329 2330 return (0); 2331 } 2332 2333 /* 2334 * This routine commits an active speculation. If the specified speculation 2335 * is not in a valid state to perform a commit(), this routine will silently do 2336 * nothing. The state of the specified speculation is transitioned according 2337 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2338 */ 2339 static void 2340 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2341 dtrace_specid_t which) 2342 { 2343 dtrace_speculation_t *spec; 2344 dtrace_buffer_t *src, *dest; 2345 uintptr_t daddr, saddr, dlimit; 2346 dtrace_speculation_state_t current, new = 0; 2347 intptr_t offs; 2348 2349 if (which == 0) 2350 return; 2351 2352 if (which > state->dts_nspeculations) { 2353 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2354 return; 2355 } 2356 2357 spec = &state->dts_speculations[which - 1]; 2358 src = &spec->dtsp_buffer[cpu]; 2359 dest = &state->dts_buffer[cpu]; 2360 2361 do { 2362 current = spec->dtsp_state; 2363 2364 if (current == DTRACESPEC_COMMITTINGMANY) 2365 break; 2366 2367 switch (current) { 2368 case DTRACESPEC_INACTIVE: 2369 case DTRACESPEC_DISCARDING: 2370 return; 2371 2372 case DTRACESPEC_COMMITTING: 2373 /* 2374 * This is only possible if we are (a) commit()'ing 2375 * without having done a prior speculate() on this CPU 2376 * and (b) racing with another commit() on a different 2377 * CPU. There's nothing to do -- we just assert that 2378 * our offset is 0. 2379 */ 2380 ASSERT(src->dtb_offset == 0); 2381 return; 2382 2383 case DTRACESPEC_ACTIVE: 2384 new = DTRACESPEC_COMMITTING; 2385 break; 2386 2387 case DTRACESPEC_ACTIVEONE: 2388 /* 2389 * This speculation is active on one CPU. If our 2390 * buffer offset is non-zero, we know that the one CPU 2391 * must be us. Otherwise, we are committing on a 2392 * different CPU from the speculate(), and we must 2393 * rely on being asynchronously cleaned. 2394 */ 2395 if (src->dtb_offset != 0) { 2396 new = DTRACESPEC_COMMITTING; 2397 break; 2398 } 2399 /*FALLTHROUGH*/ 2400 2401 case DTRACESPEC_ACTIVEMANY: 2402 new = DTRACESPEC_COMMITTINGMANY; 2403 break; 2404 2405 default: 2406 ASSERT(0); 2407 } 2408 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2409 current, new) != current); 2410 2411 /* 2412 * We have set the state to indicate that we are committing this 2413 * speculation. Now reserve the necessary space in the destination 2414 * buffer. 2415 */ 2416 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2417 sizeof (uint64_t), state, NULL)) < 0) { 2418 dtrace_buffer_drop(dest); 2419 goto out; 2420 } 2421 2422 /* 2423 * We have the space; copy the buffer across. (Note that this is a 2424 * highly subobtimal bcopy(); in the unlikely event that this becomes 2425 * a serious performance issue, a high-performance DTrace-specific 2426 * bcopy() should obviously be invented.) 2427 */ 2428 daddr = (uintptr_t)dest->dtb_tomax + offs; 2429 dlimit = daddr + src->dtb_offset; 2430 saddr = (uintptr_t)src->dtb_tomax; 2431 2432 /* 2433 * First, the aligned portion. 2434 */ 2435 while (dlimit - daddr >= sizeof (uint64_t)) { 2436 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2437 2438 daddr += sizeof (uint64_t); 2439 saddr += sizeof (uint64_t); 2440 } 2441 2442 /* 2443 * Now any left-over bit... 2444 */ 2445 while (dlimit - daddr) 2446 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2447 2448 /* 2449 * Finally, commit the reserved space in the destination buffer. 2450 */ 2451 dest->dtb_offset = offs + src->dtb_offset; 2452 2453 out: 2454 /* 2455 * If we're lucky enough to be the only active CPU on this speculation 2456 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2457 */ 2458 if (current == DTRACESPEC_ACTIVE || 2459 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2460 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2461 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2462 2463 ASSERT(rval == DTRACESPEC_COMMITTING); 2464 } 2465 2466 src->dtb_offset = 0; 2467 src->dtb_xamot_drops += src->dtb_drops; 2468 src->dtb_drops = 0; 2469 } 2470 2471 /* 2472 * This routine discards an active speculation. If the specified speculation 2473 * is not in a valid state to perform a discard(), this routine will silently 2474 * do nothing. The state of the specified speculation is transitioned 2475 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2476 */ 2477 static void 2478 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2479 dtrace_specid_t which) 2480 { 2481 dtrace_speculation_t *spec; 2482 dtrace_speculation_state_t current, new = 0; 2483 dtrace_buffer_t *buf; 2484 2485 if (which == 0) 2486 return; 2487 2488 if (which > state->dts_nspeculations) { 2489 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2490 return; 2491 } 2492 2493 spec = &state->dts_speculations[which - 1]; 2494 buf = &spec->dtsp_buffer[cpu]; 2495 2496 do { 2497 current = spec->dtsp_state; 2498 2499 switch (current) { 2500 case DTRACESPEC_INACTIVE: 2501 case DTRACESPEC_COMMITTINGMANY: 2502 case DTRACESPEC_COMMITTING: 2503 case DTRACESPEC_DISCARDING: 2504 return; 2505 2506 case DTRACESPEC_ACTIVE: 2507 case DTRACESPEC_ACTIVEMANY: 2508 new = DTRACESPEC_DISCARDING; 2509 break; 2510 2511 case DTRACESPEC_ACTIVEONE: 2512 if (buf->dtb_offset != 0) { 2513 new = DTRACESPEC_INACTIVE; 2514 } else { 2515 new = DTRACESPEC_DISCARDING; 2516 } 2517 break; 2518 2519 default: 2520 ASSERT(0); 2521 } 2522 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2523 current, new) != current); 2524 2525 buf->dtb_offset = 0; 2526 buf->dtb_drops = 0; 2527 } 2528 2529 /* 2530 * Note: not called from probe context. This function is called 2531 * asynchronously from cross call context to clean any speculations that are 2532 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2533 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2534 * speculation. 2535 */ 2536 static void 2537 dtrace_speculation_clean_here(dtrace_state_t *state) 2538 { 2539 dtrace_icookie_t cookie; 2540 processorid_t cpu = curcpu; 2541 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2542 dtrace_specid_t i; 2543 2544 cookie = dtrace_interrupt_disable(); 2545 2546 if (dest->dtb_tomax == NULL) { 2547 dtrace_interrupt_enable(cookie); 2548 return; 2549 } 2550 2551 for (i = 0; i < state->dts_nspeculations; i++) { 2552 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2553 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2554 2555 if (src->dtb_tomax == NULL) 2556 continue; 2557 2558 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2559 src->dtb_offset = 0; 2560 continue; 2561 } 2562 2563 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2564 continue; 2565 2566 if (src->dtb_offset == 0) 2567 continue; 2568 2569 dtrace_speculation_commit(state, cpu, i + 1); 2570 } 2571 2572 dtrace_interrupt_enable(cookie); 2573 } 2574 2575 /* 2576 * Note: not called from probe context. This function is called 2577 * asynchronously (and at a regular interval) to clean any speculations that 2578 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2579 * is work to be done, it cross calls all CPUs to perform that work; 2580 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2581 * INACTIVE state until they have been cleaned by all CPUs. 2582 */ 2583 static void 2584 dtrace_speculation_clean(dtrace_state_t *state) 2585 { 2586 int work = 0, rv; 2587 dtrace_specid_t i; 2588 2589 for (i = 0; i < state->dts_nspeculations; i++) { 2590 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2591 2592 ASSERT(!spec->dtsp_cleaning); 2593 2594 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2595 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2596 continue; 2597 2598 work++; 2599 spec->dtsp_cleaning = 1; 2600 } 2601 2602 if (!work) 2603 return; 2604 2605 dtrace_xcall(DTRACE_CPUALL, 2606 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2607 2608 /* 2609 * We now know that all CPUs have committed or discarded their 2610 * speculation buffers, as appropriate. We can now set the state 2611 * to inactive. 2612 */ 2613 for (i = 0; i < state->dts_nspeculations; i++) { 2614 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2615 dtrace_speculation_state_t current, new; 2616 2617 if (!spec->dtsp_cleaning) 2618 continue; 2619 2620 current = spec->dtsp_state; 2621 ASSERT(current == DTRACESPEC_DISCARDING || 2622 current == DTRACESPEC_COMMITTINGMANY); 2623 2624 new = DTRACESPEC_INACTIVE; 2625 2626 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2627 ASSERT(rv == current); 2628 spec->dtsp_cleaning = 0; 2629 } 2630 } 2631 2632 /* 2633 * Called as part of a speculate() to get the speculative buffer associated 2634 * with a given speculation. Returns NULL if the specified speculation is not 2635 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2636 * the active CPU is not the specified CPU -- the speculation will be 2637 * atomically transitioned into the ACTIVEMANY state. 2638 */ 2639 static dtrace_buffer_t * 2640 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2641 dtrace_specid_t which) 2642 { 2643 dtrace_speculation_t *spec; 2644 dtrace_speculation_state_t current, new = 0; 2645 dtrace_buffer_t *buf; 2646 2647 if (which == 0) 2648 return (NULL); 2649 2650 if (which > state->dts_nspeculations) { 2651 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2652 return (NULL); 2653 } 2654 2655 spec = &state->dts_speculations[which - 1]; 2656 buf = &spec->dtsp_buffer[cpuid]; 2657 2658 do { 2659 current = spec->dtsp_state; 2660 2661 switch (current) { 2662 case DTRACESPEC_INACTIVE: 2663 case DTRACESPEC_COMMITTINGMANY: 2664 case DTRACESPEC_DISCARDING: 2665 return (NULL); 2666 2667 case DTRACESPEC_COMMITTING: 2668 ASSERT(buf->dtb_offset == 0); 2669 return (NULL); 2670 2671 case DTRACESPEC_ACTIVEONE: 2672 /* 2673 * This speculation is currently active on one CPU. 2674 * Check the offset in the buffer; if it's non-zero, 2675 * that CPU must be us (and we leave the state alone). 2676 * If it's zero, assume that we're starting on a new 2677 * CPU -- and change the state to indicate that the 2678 * speculation is active on more than one CPU. 2679 */ 2680 if (buf->dtb_offset != 0) 2681 return (buf); 2682 2683 new = DTRACESPEC_ACTIVEMANY; 2684 break; 2685 2686 case DTRACESPEC_ACTIVEMANY: 2687 return (buf); 2688 2689 case DTRACESPEC_ACTIVE: 2690 new = DTRACESPEC_ACTIVEONE; 2691 break; 2692 2693 default: 2694 ASSERT(0); 2695 } 2696 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2697 current, new) != current); 2698 2699 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2700 return (buf); 2701 } 2702 2703 /* 2704 * Return a string. In the event that the user lacks the privilege to access 2705 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2706 * don't fail access checking. 2707 * 2708 * dtrace_dif_variable() uses this routine as a helper for various 2709 * builtin values such as 'execname' and 'probefunc.' 2710 */ 2711 uintptr_t 2712 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2713 dtrace_mstate_t *mstate) 2714 { 2715 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2716 uintptr_t ret; 2717 size_t strsz; 2718 2719 /* 2720 * The easy case: this probe is allowed to read all of memory, so 2721 * we can just return this as a vanilla pointer. 2722 */ 2723 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2724 return (addr); 2725 2726 /* 2727 * This is the tougher case: we copy the string in question from 2728 * kernel memory into scratch memory and return it that way: this 2729 * ensures that we won't trip up when access checking tests the 2730 * BYREF return value. 2731 */ 2732 strsz = dtrace_strlen((char *)addr, size) + 1; 2733 2734 if (mstate->dtms_scratch_ptr + strsz > 2735 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2736 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2737 return (0); 2738 } 2739 2740 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2741 strsz); 2742 ret = mstate->dtms_scratch_ptr; 2743 mstate->dtms_scratch_ptr += strsz; 2744 return (ret); 2745 } 2746 2747 /* 2748 * Return a string from a memoy address which is known to have one or 2749 * more concatenated, individually zero terminated, sub-strings. 2750 * In the event that the user lacks the privilege to access 2751 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2752 * don't fail access checking. 2753 * 2754 * dtrace_dif_variable() uses this routine as a helper for various 2755 * builtin values such as 'execargs'. 2756 */ 2757 static uintptr_t 2758 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2759 dtrace_mstate_t *mstate) 2760 { 2761 char *p; 2762 size_t i; 2763 uintptr_t ret; 2764 2765 if (mstate->dtms_scratch_ptr + strsz > 2766 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2767 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2768 return (0); 2769 } 2770 2771 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2772 strsz); 2773 2774 /* Replace sub-string termination characters with a space. */ 2775 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2776 p++, i++) 2777 if (*p == '\0') 2778 *p = ' '; 2779 2780 ret = mstate->dtms_scratch_ptr; 2781 mstate->dtms_scratch_ptr += strsz; 2782 return (ret); 2783 } 2784 2785 /* 2786 * This function implements the DIF emulator's variable lookups. The emulator 2787 * passes a reserved variable identifier and optional built-in array index. 2788 */ 2789 static uint64_t 2790 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2791 uint64_t ndx) 2792 { 2793 /* 2794 * If we're accessing one of the uncached arguments, we'll turn this 2795 * into a reference in the args array. 2796 */ 2797 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2798 ndx = v - DIF_VAR_ARG0; 2799 v = DIF_VAR_ARGS; 2800 } 2801 2802 switch (v) { 2803 case DIF_VAR_ARGS: 2804 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2805 if (ndx >= sizeof (mstate->dtms_arg) / 2806 sizeof (mstate->dtms_arg[0])) { 2807 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2808 dtrace_provider_t *pv; 2809 uint64_t val; 2810 2811 pv = mstate->dtms_probe->dtpr_provider; 2812 if (pv->dtpv_pops.dtps_getargval != NULL) 2813 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2814 mstate->dtms_probe->dtpr_id, 2815 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2816 else 2817 val = dtrace_getarg(ndx, aframes); 2818 2819 /* 2820 * This is regrettably required to keep the compiler 2821 * from tail-optimizing the call to dtrace_getarg(). 2822 * The condition always evaluates to true, but the 2823 * compiler has no way of figuring that out a priori. 2824 * (None of this would be necessary if the compiler 2825 * could be relied upon to _always_ tail-optimize 2826 * the call to dtrace_getarg() -- but it can't.) 2827 */ 2828 if (mstate->dtms_probe != NULL) 2829 return (val); 2830 2831 ASSERT(0); 2832 } 2833 2834 return (mstate->dtms_arg[ndx]); 2835 2836 #if defined(sun) 2837 case DIF_VAR_UREGS: { 2838 klwp_t *lwp; 2839 2840 if (!dtrace_priv_proc(state)) 2841 return (0); 2842 2843 if ((lwp = curthread->t_lwp) == NULL) { 2844 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2845 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2846 return (0); 2847 } 2848 2849 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2850 return (0); 2851 } 2852 #else 2853 case DIF_VAR_UREGS: { 2854 struct trapframe *tframe; 2855 2856 if (!dtrace_priv_proc(state)) 2857 return (0); 2858 2859 if ((tframe = curthread->td_frame) == NULL) { 2860 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2861 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2862 return (0); 2863 } 2864 2865 return (dtrace_getreg(tframe, ndx)); 2866 } 2867 #endif 2868 2869 case DIF_VAR_CURTHREAD: 2870 if (!dtrace_priv_kernel(state)) 2871 return (0); 2872 return ((uint64_t)(uintptr_t)curthread); 2873 2874 case DIF_VAR_TIMESTAMP: 2875 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2876 mstate->dtms_timestamp = dtrace_gethrtime(); 2877 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2878 } 2879 return (mstate->dtms_timestamp); 2880 2881 case DIF_VAR_VTIMESTAMP: 2882 ASSERT(dtrace_vtime_references != 0); 2883 return (curthread->t_dtrace_vtime); 2884 2885 case DIF_VAR_WALLTIMESTAMP: 2886 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2887 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2888 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2889 } 2890 return (mstate->dtms_walltimestamp); 2891 2892 #if defined(sun) 2893 case DIF_VAR_IPL: 2894 if (!dtrace_priv_kernel(state)) 2895 return (0); 2896 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2897 mstate->dtms_ipl = dtrace_getipl(); 2898 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2899 } 2900 return (mstate->dtms_ipl); 2901 #endif 2902 2903 case DIF_VAR_EPID: 2904 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2905 return (mstate->dtms_epid); 2906 2907 case DIF_VAR_ID: 2908 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2909 return (mstate->dtms_probe->dtpr_id); 2910 2911 case DIF_VAR_STACKDEPTH: 2912 if (!dtrace_priv_kernel(state)) 2913 return (0); 2914 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2915 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2916 2917 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2918 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2919 } 2920 return (mstate->dtms_stackdepth); 2921 2922 case DIF_VAR_USTACKDEPTH: 2923 if (!dtrace_priv_proc(state)) 2924 return (0); 2925 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2926 /* 2927 * See comment in DIF_VAR_PID. 2928 */ 2929 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2930 CPU_ON_INTR(CPU)) { 2931 mstate->dtms_ustackdepth = 0; 2932 } else { 2933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2934 mstate->dtms_ustackdepth = 2935 dtrace_getustackdepth(); 2936 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2937 } 2938 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2939 } 2940 return (mstate->dtms_ustackdepth); 2941 2942 case DIF_VAR_CALLER: 2943 if (!dtrace_priv_kernel(state)) 2944 return (0); 2945 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2946 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2947 2948 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2949 /* 2950 * If this is an unanchored probe, we are 2951 * required to go through the slow path: 2952 * dtrace_caller() only guarantees correct 2953 * results for anchored probes. 2954 */ 2955 pc_t caller[2] = {0, 0}; 2956 2957 dtrace_getpcstack(caller, 2, aframes, 2958 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2959 mstate->dtms_caller = caller[1]; 2960 } else if ((mstate->dtms_caller = 2961 dtrace_caller(aframes)) == -1) { 2962 /* 2963 * We have failed to do this the quick way; 2964 * we must resort to the slower approach of 2965 * calling dtrace_getpcstack(). 2966 */ 2967 pc_t caller = 0; 2968 2969 dtrace_getpcstack(&caller, 1, aframes, NULL); 2970 mstate->dtms_caller = caller; 2971 } 2972 2973 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2974 } 2975 return (mstate->dtms_caller); 2976 2977 case DIF_VAR_UCALLER: 2978 if (!dtrace_priv_proc(state)) 2979 return (0); 2980 2981 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2982 uint64_t ustack[3]; 2983 2984 /* 2985 * dtrace_getupcstack() fills in the first uint64_t 2986 * with the current PID. The second uint64_t will 2987 * be the program counter at user-level. The third 2988 * uint64_t will contain the caller, which is what 2989 * we're after. 2990 */ 2991 ustack[2] = 0; 2992 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2993 dtrace_getupcstack(ustack, 3); 2994 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2995 mstate->dtms_ucaller = ustack[2]; 2996 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2997 } 2998 2999 return (mstate->dtms_ucaller); 3000 3001 case DIF_VAR_PROBEPROV: 3002 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3003 return (dtrace_dif_varstr( 3004 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3005 state, mstate)); 3006 3007 case DIF_VAR_PROBEMOD: 3008 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3009 return (dtrace_dif_varstr( 3010 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3011 state, mstate)); 3012 3013 case DIF_VAR_PROBEFUNC: 3014 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3015 return (dtrace_dif_varstr( 3016 (uintptr_t)mstate->dtms_probe->dtpr_func, 3017 state, mstate)); 3018 3019 case DIF_VAR_PROBENAME: 3020 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3021 return (dtrace_dif_varstr( 3022 (uintptr_t)mstate->dtms_probe->dtpr_name, 3023 state, mstate)); 3024 3025 case DIF_VAR_PID: 3026 if (!dtrace_priv_proc(state)) 3027 return (0); 3028 3029 #if defined(sun) 3030 /* 3031 * Note that we are assuming that an unanchored probe is 3032 * always due to a high-level interrupt. (And we're assuming 3033 * that there is only a single high level interrupt.) 3034 */ 3035 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3036 return (pid0.pid_id); 3037 3038 /* 3039 * It is always safe to dereference one's own t_procp pointer: 3040 * it always points to a valid, allocated proc structure. 3041 * Further, it is always safe to dereference the p_pidp member 3042 * of one's own proc structure. (These are truisms becuase 3043 * threads and processes don't clean up their own state -- 3044 * they leave that task to whomever reaps them.) 3045 */ 3046 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3047 #else 3048 return ((uint64_t)curproc->p_pid); 3049 #endif 3050 3051 case DIF_VAR_PPID: 3052 if (!dtrace_priv_proc(state)) 3053 return (0); 3054 3055 #if defined(sun) 3056 /* 3057 * See comment in DIF_VAR_PID. 3058 */ 3059 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3060 return (pid0.pid_id); 3061 3062 /* 3063 * It is always safe to dereference one's own t_procp pointer: 3064 * it always points to a valid, allocated proc structure. 3065 * (This is true because threads don't clean up their own 3066 * state -- they leave that task to whomever reaps them.) 3067 */ 3068 return ((uint64_t)curthread->t_procp->p_ppid); 3069 #else 3070 return ((uint64_t)curproc->p_pptr->p_pid); 3071 #endif 3072 3073 case DIF_VAR_TID: 3074 #if defined(sun) 3075 /* 3076 * See comment in DIF_VAR_PID. 3077 */ 3078 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3079 return (0); 3080 #endif 3081 3082 return ((uint64_t)curthread->t_tid); 3083 3084 case DIF_VAR_EXECARGS: { 3085 struct pargs *p_args = curthread->td_proc->p_args; 3086 3087 if (p_args == NULL) 3088 return(0); 3089 3090 return (dtrace_dif_varstrz( 3091 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3092 } 3093 3094 case DIF_VAR_EXECNAME: 3095 #if defined(sun) 3096 if (!dtrace_priv_proc(state)) 3097 return (0); 3098 3099 /* 3100 * See comment in DIF_VAR_PID. 3101 */ 3102 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3103 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3104 3105 /* 3106 * It is always safe to dereference one's own t_procp pointer: 3107 * it always points to a valid, allocated proc structure. 3108 * (This is true because threads don't clean up their own 3109 * state -- they leave that task to whomever reaps them.) 3110 */ 3111 return (dtrace_dif_varstr( 3112 (uintptr_t)curthread->t_procp->p_user.u_comm, 3113 state, mstate)); 3114 #else 3115 return (dtrace_dif_varstr( 3116 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3117 #endif 3118 3119 case DIF_VAR_ZONENAME: 3120 #if defined(sun) 3121 if (!dtrace_priv_proc(state)) 3122 return (0); 3123 3124 /* 3125 * See comment in DIF_VAR_PID. 3126 */ 3127 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3128 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3129 3130 /* 3131 * It is always safe to dereference one's own t_procp pointer: 3132 * it always points to a valid, allocated proc structure. 3133 * (This is true because threads don't clean up their own 3134 * state -- they leave that task to whomever reaps them.) 3135 */ 3136 return (dtrace_dif_varstr( 3137 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3138 state, mstate)); 3139 #else 3140 return (0); 3141 #endif 3142 3143 case DIF_VAR_UID: 3144 if (!dtrace_priv_proc(state)) 3145 return (0); 3146 3147 #if defined(sun) 3148 /* 3149 * See comment in DIF_VAR_PID. 3150 */ 3151 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3152 return ((uint64_t)p0.p_cred->cr_uid); 3153 #endif 3154 3155 /* 3156 * It is always safe to dereference one's own t_procp pointer: 3157 * it always points to a valid, allocated proc structure. 3158 * (This is true because threads don't clean up their own 3159 * state -- they leave that task to whomever reaps them.) 3160 * 3161 * Additionally, it is safe to dereference one's own process 3162 * credential, since this is never NULL after process birth. 3163 */ 3164 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3165 3166 case DIF_VAR_GID: 3167 if (!dtrace_priv_proc(state)) 3168 return (0); 3169 3170 #if defined(sun) 3171 /* 3172 * See comment in DIF_VAR_PID. 3173 */ 3174 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3175 return ((uint64_t)p0.p_cred->cr_gid); 3176 #endif 3177 3178 /* 3179 * It is always safe to dereference one's own t_procp pointer: 3180 * it always points to a valid, allocated proc structure. 3181 * (This is true because threads don't clean up their own 3182 * state -- they leave that task to whomever reaps them.) 3183 * 3184 * Additionally, it is safe to dereference one's own process 3185 * credential, since this is never NULL after process birth. 3186 */ 3187 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3188 3189 case DIF_VAR_ERRNO: { 3190 #if defined(sun) 3191 klwp_t *lwp; 3192 if (!dtrace_priv_proc(state)) 3193 return (0); 3194 3195 /* 3196 * See comment in DIF_VAR_PID. 3197 */ 3198 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3199 return (0); 3200 3201 /* 3202 * It is always safe to dereference one's own t_lwp pointer in 3203 * the event that this pointer is non-NULL. (This is true 3204 * because threads and lwps don't clean up their own state -- 3205 * they leave that task to whomever reaps them.) 3206 */ 3207 if ((lwp = curthread->t_lwp) == NULL) 3208 return (0); 3209 3210 return ((uint64_t)lwp->lwp_errno); 3211 #else 3212 return (curthread->td_errno); 3213 #endif 3214 } 3215 #if !defined(sun) 3216 case DIF_VAR_CPU: { 3217 return curcpu; 3218 } 3219 #endif 3220 default: 3221 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3222 return (0); 3223 } 3224 } 3225 3226 /* 3227 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3228 * Notice that we don't bother validating the proper number of arguments or 3229 * their types in the tuple stack. This isn't needed because all argument 3230 * interpretation is safe because of our load safety -- the worst that can 3231 * happen is that a bogus program can obtain bogus results. 3232 */ 3233 static void 3234 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3235 dtrace_key_t *tupregs, int nargs, 3236 dtrace_mstate_t *mstate, dtrace_state_t *state) 3237 { 3238 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3239 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3240 dtrace_vstate_t *vstate = &state->dts_vstate; 3241 3242 #if defined(sun) 3243 union { 3244 mutex_impl_t mi; 3245 uint64_t mx; 3246 } m; 3247 3248 union { 3249 krwlock_t ri; 3250 uintptr_t rw; 3251 } r; 3252 #else 3253 struct thread *lowner; 3254 union { 3255 struct lock_object *li; 3256 uintptr_t lx; 3257 } l; 3258 #endif 3259 3260 switch (subr) { 3261 case DIF_SUBR_RAND: 3262 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3263 break; 3264 3265 #if defined(sun) 3266 case DIF_SUBR_MUTEX_OWNED: 3267 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3268 mstate, vstate)) { 3269 regs[rd] = 0; 3270 break; 3271 } 3272 3273 m.mx = dtrace_load64(tupregs[0].dttk_value); 3274 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3275 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3276 else 3277 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3278 break; 3279 3280 case DIF_SUBR_MUTEX_OWNER: 3281 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3282 mstate, vstate)) { 3283 regs[rd] = 0; 3284 break; 3285 } 3286 3287 m.mx = dtrace_load64(tupregs[0].dttk_value); 3288 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3289 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3290 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3291 else 3292 regs[rd] = 0; 3293 break; 3294 3295 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3296 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3297 mstate, vstate)) { 3298 regs[rd] = 0; 3299 break; 3300 } 3301 3302 m.mx = dtrace_load64(tupregs[0].dttk_value); 3303 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3304 break; 3305 3306 case DIF_SUBR_MUTEX_TYPE_SPIN: 3307 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3308 mstate, vstate)) { 3309 regs[rd] = 0; 3310 break; 3311 } 3312 3313 m.mx = dtrace_load64(tupregs[0].dttk_value); 3314 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3315 break; 3316 3317 case DIF_SUBR_RW_READ_HELD: { 3318 uintptr_t tmp; 3319 3320 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3321 mstate, vstate)) { 3322 regs[rd] = 0; 3323 break; 3324 } 3325 3326 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3327 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3328 break; 3329 } 3330 3331 case DIF_SUBR_RW_WRITE_HELD: 3332 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3333 mstate, vstate)) { 3334 regs[rd] = 0; 3335 break; 3336 } 3337 3338 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3339 regs[rd] = _RW_WRITE_HELD(&r.ri); 3340 break; 3341 3342 case DIF_SUBR_RW_ISWRITER: 3343 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3344 mstate, vstate)) { 3345 regs[rd] = 0; 3346 break; 3347 } 3348 3349 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3350 regs[rd] = _RW_ISWRITER(&r.ri); 3351 break; 3352 3353 #else 3354 case DIF_SUBR_MUTEX_OWNED: 3355 if (!dtrace_canload(tupregs[0].dttk_value, 3356 sizeof (struct lock_object), mstate, vstate)) { 3357 regs[rd] = 0; 3358 break; 3359 } 3360 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3361 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3362 break; 3363 3364 case DIF_SUBR_MUTEX_OWNER: 3365 if (!dtrace_canload(tupregs[0].dttk_value, 3366 sizeof (struct lock_object), mstate, vstate)) { 3367 regs[rd] = 0; 3368 break; 3369 } 3370 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3371 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3372 regs[rd] = (uintptr_t)lowner; 3373 break; 3374 3375 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3376 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3377 mstate, vstate)) { 3378 regs[rd] = 0; 3379 break; 3380 } 3381 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3382 /* XXX - should be only LC_SLEEPABLE? */ 3383 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3384 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3385 break; 3386 3387 case DIF_SUBR_MUTEX_TYPE_SPIN: 3388 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3389 mstate, vstate)) { 3390 regs[rd] = 0; 3391 break; 3392 } 3393 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3394 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3395 break; 3396 3397 case DIF_SUBR_RW_READ_HELD: 3398 case DIF_SUBR_SX_SHARED_HELD: 3399 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3400 mstate, vstate)) { 3401 regs[rd] = 0; 3402 break; 3403 } 3404 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3405 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3406 lowner == NULL; 3407 break; 3408 3409 case DIF_SUBR_RW_WRITE_HELD: 3410 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3411 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3412 mstate, vstate)) { 3413 regs[rd] = 0; 3414 break; 3415 } 3416 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3417 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3418 regs[rd] = (lowner == curthread); 3419 break; 3420 3421 case DIF_SUBR_RW_ISWRITER: 3422 case DIF_SUBR_SX_ISEXCLUSIVE: 3423 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3424 mstate, vstate)) { 3425 regs[rd] = 0; 3426 break; 3427 } 3428 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3429 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3430 lowner != NULL; 3431 break; 3432 #endif /* ! defined(sun) */ 3433 3434 case DIF_SUBR_BCOPY: { 3435 /* 3436 * We need to be sure that the destination is in the scratch 3437 * region -- no other region is allowed. 3438 */ 3439 uintptr_t src = tupregs[0].dttk_value; 3440 uintptr_t dest = tupregs[1].dttk_value; 3441 size_t size = tupregs[2].dttk_value; 3442 3443 if (!dtrace_inscratch(dest, size, mstate)) { 3444 *flags |= CPU_DTRACE_BADADDR; 3445 *illval = regs[rd]; 3446 break; 3447 } 3448 3449 if (!dtrace_canload(src, size, mstate, vstate)) { 3450 regs[rd] = 0; 3451 break; 3452 } 3453 3454 dtrace_bcopy((void *)src, (void *)dest, size); 3455 break; 3456 } 3457 3458 case DIF_SUBR_ALLOCA: 3459 case DIF_SUBR_COPYIN: { 3460 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3461 uint64_t size = 3462 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3463 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3464 3465 /* 3466 * This action doesn't require any credential checks since 3467 * probes will not activate in user contexts to which the 3468 * enabling user does not have permissions. 3469 */ 3470 3471 /* 3472 * Rounding up the user allocation size could have overflowed 3473 * a large, bogus allocation (like -1ULL) to 0. 3474 */ 3475 if (scratch_size < size || 3476 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3477 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3478 regs[rd] = 0; 3479 break; 3480 } 3481 3482 if (subr == DIF_SUBR_COPYIN) { 3483 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3484 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3485 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3486 } 3487 3488 mstate->dtms_scratch_ptr += scratch_size; 3489 regs[rd] = dest; 3490 break; 3491 } 3492 3493 case DIF_SUBR_COPYINTO: { 3494 uint64_t size = tupregs[1].dttk_value; 3495 uintptr_t dest = tupregs[2].dttk_value; 3496 3497 /* 3498 * This action doesn't require any credential checks since 3499 * probes will not activate in user contexts to which the 3500 * enabling user does not have permissions. 3501 */ 3502 if (!dtrace_inscratch(dest, size, mstate)) { 3503 *flags |= CPU_DTRACE_BADADDR; 3504 *illval = regs[rd]; 3505 break; 3506 } 3507 3508 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3509 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3510 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3511 break; 3512 } 3513 3514 case DIF_SUBR_COPYINSTR: { 3515 uintptr_t dest = mstate->dtms_scratch_ptr; 3516 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3517 3518 if (nargs > 1 && tupregs[1].dttk_value < size) 3519 size = tupregs[1].dttk_value + 1; 3520 3521 /* 3522 * This action doesn't require any credential checks since 3523 * probes will not activate in user contexts to which the 3524 * enabling user does not have permissions. 3525 */ 3526 if (!DTRACE_INSCRATCH(mstate, size)) { 3527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3528 regs[rd] = 0; 3529 break; 3530 } 3531 3532 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3533 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3534 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3535 3536 ((char *)dest)[size - 1] = '\0'; 3537 mstate->dtms_scratch_ptr += size; 3538 regs[rd] = dest; 3539 break; 3540 } 3541 3542 #if defined(sun) 3543 case DIF_SUBR_MSGSIZE: 3544 case DIF_SUBR_MSGDSIZE: { 3545 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3546 uintptr_t wptr, rptr; 3547 size_t count = 0; 3548 int cont = 0; 3549 3550 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3551 3552 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3553 vstate)) { 3554 regs[rd] = 0; 3555 break; 3556 } 3557 3558 wptr = dtrace_loadptr(baddr + 3559 offsetof(mblk_t, b_wptr)); 3560 3561 rptr = dtrace_loadptr(baddr + 3562 offsetof(mblk_t, b_rptr)); 3563 3564 if (wptr < rptr) { 3565 *flags |= CPU_DTRACE_BADADDR; 3566 *illval = tupregs[0].dttk_value; 3567 break; 3568 } 3569 3570 daddr = dtrace_loadptr(baddr + 3571 offsetof(mblk_t, b_datap)); 3572 3573 baddr = dtrace_loadptr(baddr + 3574 offsetof(mblk_t, b_cont)); 3575 3576 /* 3577 * We want to prevent against denial-of-service here, 3578 * so we're only going to search the list for 3579 * dtrace_msgdsize_max mblks. 3580 */ 3581 if (cont++ > dtrace_msgdsize_max) { 3582 *flags |= CPU_DTRACE_ILLOP; 3583 break; 3584 } 3585 3586 if (subr == DIF_SUBR_MSGDSIZE) { 3587 if (dtrace_load8(daddr + 3588 offsetof(dblk_t, db_type)) != M_DATA) 3589 continue; 3590 } 3591 3592 count += wptr - rptr; 3593 } 3594 3595 if (!(*flags & CPU_DTRACE_FAULT)) 3596 regs[rd] = count; 3597 3598 break; 3599 } 3600 #endif 3601 3602 case DIF_SUBR_PROGENYOF: { 3603 pid_t pid = tupregs[0].dttk_value; 3604 proc_t *p; 3605 int rval = 0; 3606 3607 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3608 3609 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3610 #if defined(sun) 3611 if (p->p_pidp->pid_id == pid) { 3612 #else 3613 if (p->p_pid == pid) { 3614 #endif 3615 rval = 1; 3616 break; 3617 } 3618 } 3619 3620 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3621 3622 regs[rd] = rval; 3623 break; 3624 } 3625 3626 case DIF_SUBR_SPECULATION: 3627 regs[rd] = dtrace_speculation(state); 3628 break; 3629 3630 case DIF_SUBR_COPYOUT: { 3631 uintptr_t kaddr = tupregs[0].dttk_value; 3632 uintptr_t uaddr = tupregs[1].dttk_value; 3633 uint64_t size = tupregs[2].dttk_value; 3634 3635 if (!dtrace_destructive_disallow && 3636 dtrace_priv_proc_control(state) && 3637 !dtrace_istoxic(kaddr, size)) { 3638 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3639 dtrace_copyout(kaddr, uaddr, size, flags); 3640 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3641 } 3642 break; 3643 } 3644 3645 case DIF_SUBR_COPYOUTSTR: { 3646 uintptr_t kaddr = tupregs[0].dttk_value; 3647 uintptr_t uaddr = tupregs[1].dttk_value; 3648 uint64_t size = tupregs[2].dttk_value; 3649 3650 if (!dtrace_destructive_disallow && 3651 dtrace_priv_proc_control(state) && 3652 !dtrace_istoxic(kaddr, size)) { 3653 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3654 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3655 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3656 } 3657 break; 3658 } 3659 3660 case DIF_SUBR_STRLEN: { 3661 size_t sz; 3662 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3663 sz = dtrace_strlen((char *)addr, 3664 state->dts_options[DTRACEOPT_STRSIZE]); 3665 3666 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3667 regs[rd] = 0; 3668 break; 3669 } 3670 3671 regs[rd] = sz; 3672 3673 break; 3674 } 3675 3676 case DIF_SUBR_STRCHR: 3677 case DIF_SUBR_STRRCHR: { 3678 /* 3679 * We're going to iterate over the string looking for the 3680 * specified character. We will iterate until we have reached 3681 * the string length or we have found the character. If this 3682 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3683 * of the specified character instead of the first. 3684 */ 3685 uintptr_t saddr = tupregs[0].dttk_value; 3686 uintptr_t addr = tupregs[0].dttk_value; 3687 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3688 char c, target = (char)tupregs[1].dttk_value; 3689 3690 for (regs[rd] = 0; addr < limit; addr++) { 3691 if ((c = dtrace_load8(addr)) == target) { 3692 regs[rd] = addr; 3693 3694 if (subr == DIF_SUBR_STRCHR) 3695 break; 3696 } 3697 3698 if (c == '\0') 3699 break; 3700 } 3701 3702 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3703 regs[rd] = 0; 3704 break; 3705 } 3706 3707 break; 3708 } 3709 3710 case DIF_SUBR_STRSTR: 3711 case DIF_SUBR_INDEX: 3712 case DIF_SUBR_RINDEX: { 3713 /* 3714 * We're going to iterate over the string looking for the 3715 * specified string. We will iterate until we have reached 3716 * the string length or we have found the string. (Yes, this 3717 * is done in the most naive way possible -- but considering 3718 * that the string we're searching for is likely to be 3719 * relatively short, the complexity of Rabin-Karp or similar 3720 * hardly seems merited.) 3721 */ 3722 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3723 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3724 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3725 size_t len = dtrace_strlen(addr, size); 3726 size_t sublen = dtrace_strlen(substr, size); 3727 char *limit = addr + len, *orig = addr; 3728 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3729 int inc = 1; 3730 3731 regs[rd] = notfound; 3732 3733 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3734 regs[rd] = 0; 3735 break; 3736 } 3737 3738 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3739 vstate)) { 3740 regs[rd] = 0; 3741 break; 3742 } 3743 3744 /* 3745 * strstr() and index()/rindex() have similar semantics if 3746 * both strings are the empty string: strstr() returns a 3747 * pointer to the (empty) string, and index() and rindex() 3748 * both return index 0 (regardless of any position argument). 3749 */ 3750 if (sublen == 0 && len == 0) { 3751 if (subr == DIF_SUBR_STRSTR) 3752 regs[rd] = (uintptr_t)addr; 3753 else 3754 regs[rd] = 0; 3755 break; 3756 } 3757 3758 if (subr != DIF_SUBR_STRSTR) { 3759 if (subr == DIF_SUBR_RINDEX) { 3760 limit = orig - 1; 3761 addr += len; 3762 inc = -1; 3763 } 3764 3765 /* 3766 * Both index() and rindex() take an optional position 3767 * argument that denotes the starting position. 3768 */ 3769 if (nargs == 3) { 3770 int64_t pos = (int64_t)tupregs[2].dttk_value; 3771 3772 /* 3773 * If the position argument to index() is 3774 * negative, Perl implicitly clamps it at 3775 * zero. This semantic is a little surprising 3776 * given the special meaning of negative 3777 * positions to similar Perl functions like 3778 * substr(), but it appears to reflect a 3779 * notion that index() can start from a 3780 * negative index and increment its way up to 3781 * the string. Given this notion, Perl's 3782 * rindex() is at least self-consistent in 3783 * that it implicitly clamps positions greater 3784 * than the string length to be the string 3785 * length. Where Perl completely loses 3786 * coherence, however, is when the specified 3787 * substring is the empty string (""). In 3788 * this case, even if the position is 3789 * negative, rindex() returns 0 -- and even if 3790 * the position is greater than the length, 3791 * index() returns the string length. These 3792 * semantics violate the notion that index() 3793 * should never return a value less than the 3794 * specified position and that rindex() should 3795 * never return a value greater than the 3796 * specified position. (One assumes that 3797 * these semantics are artifacts of Perl's 3798 * implementation and not the results of 3799 * deliberate design -- it beggars belief that 3800 * even Larry Wall could desire such oddness.) 3801 * While in the abstract one would wish for 3802 * consistent position semantics across 3803 * substr(), index() and rindex() -- or at the 3804 * very least self-consistent position 3805 * semantics for index() and rindex() -- we 3806 * instead opt to keep with the extant Perl 3807 * semantics, in all their broken glory. (Do 3808 * we have more desire to maintain Perl's 3809 * semantics than Perl does? Probably.) 3810 */ 3811 if (subr == DIF_SUBR_RINDEX) { 3812 if (pos < 0) { 3813 if (sublen == 0) 3814 regs[rd] = 0; 3815 break; 3816 } 3817 3818 if (pos > len) 3819 pos = len; 3820 } else { 3821 if (pos < 0) 3822 pos = 0; 3823 3824 if (pos >= len) { 3825 if (sublen == 0) 3826 regs[rd] = len; 3827 break; 3828 } 3829 } 3830 3831 addr = orig + pos; 3832 } 3833 } 3834 3835 for (regs[rd] = notfound; addr != limit; addr += inc) { 3836 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3837 if (subr != DIF_SUBR_STRSTR) { 3838 /* 3839 * As D index() and rindex() are 3840 * modeled on Perl (and not on awk), 3841 * we return a zero-based (and not a 3842 * one-based) index. (For you Perl 3843 * weenies: no, we're not going to add 3844 * $[ -- and shouldn't you be at a con 3845 * or something?) 3846 */ 3847 regs[rd] = (uintptr_t)(addr - orig); 3848 break; 3849 } 3850 3851 ASSERT(subr == DIF_SUBR_STRSTR); 3852 regs[rd] = (uintptr_t)addr; 3853 break; 3854 } 3855 } 3856 3857 break; 3858 } 3859 3860 case DIF_SUBR_STRTOK: { 3861 uintptr_t addr = tupregs[0].dttk_value; 3862 uintptr_t tokaddr = tupregs[1].dttk_value; 3863 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3864 uintptr_t limit, toklimit = tokaddr + size; 3865 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3866 char *dest = (char *)mstate->dtms_scratch_ptr; 3867 int i; 3868 3869 /* 3870 * Check both the token buffer and (later) the input buffer, 3871 * since both could be non-scratch addresses. 3872 */ 3873 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3874 regs[rd] = 0; 3875 break; 3876 } 3877 3878 if (!DTRACE_INSCRATCH(mstate, size)) { 3879 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3880 regs[rd] = 0; 3881 break; 3882 } 3883 3884 if (addr == 0) { 3885 /* 3886 * If the address specified is NULL, we use our saved 3887 * strtok pointer from the mstate. Note that this 3888 * means that the saved strtok pointer is _only_ 3889 * valid within multiple enablings of the same probe -- 3890 * it behaves like an implicit clause-local variable. 3891 */ 3892 addr = mstate->dtms_strtok; 3893 } else { 3894 /* 3895 * If the user-specified address is non-NULL we must 3896 * access check it. This is the only time we have 3897 * a chance to do so, since this address may reside 3898 * in the string table of this clause-- future calls 3899 * (when we fetch addr from mstate->dtms_strtok) 3900 * would fail this access check. 3901 */ 3902 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3903 regs[rd] = 0; 3904 break; 3905 } 3906 } 3907 3908 /* 3909 * First, zero the token map, and then process the token 3910 * string -- setting a bit in the map for every character 3911 * found in the token string. 3912 */ 3913 for (i = 0; i < sizeof (tokmap); i++) 3914 tokmap[i] = 0; 3915 3916 for (; tokaddr < toklimit; tokaddr++) { 3917 if ((c = dtrace_load8(tokaddr)) == '\0') 3918 break; 3919 3920 ASSERT((c >> 3) < sizeof (tokmap)); 3921 tokmap[c >> 3] |= (1 << (c & 0x7)); 3922 } 3923 3924 for (limit = addr + size; addr < limit; addr++) { 3925 /* 3926 * We're looking for a character that is _not_ contained 3927 * in the token string. 3928 */ 3929 if ((c = dtrace_load8(addr)) == '\0') 3930 break; 3931 3932 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3933 break; 3934 } 3935 3936 if (c == '\0') { 3937 /* 3938 * We reached the end of the string without finding 3939 * any character that was not in the token string. 3940 * We return NULL in this case, and we set the saved 3941 * address to NULL as well. 3942 */ 3943 regs[rd] = 0; 3944 mstate->dtms_strtok = 0; 3945 break; 3946 } 3947 3948 /* 3949 * From here on, we're copying into the destination string. 3950 */ 3951 for (i = 0; addr < limit && i < size - 1; addr++) { 3952 if ((c = dtrace_load8(addr)) == '\0') 3953 break; 3954 3955 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3956 break; 3957 3958 ASSERT(i < size); 3959 dest[i++] = c; 3960 } 3961 3962 ASSERT(i < size); 3963 dest[i] = '\0'; 3964 regs[rd] = (uintptr_t)dest; 3965 mstate->dtms_scratch_ptr += size; 3966 mstate->dtms_strtok = addr; 3967 break; 3968 } 3969 3970 case DIF_SUBR_SUBSTR: { 3971 uintptr_t s = tupregs[0].dttk_value; 3972 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3973 char *d = (char *)mstate->dtms_scratch_ptr; 3974 int64_t index = (int64_t)tupregs[1].dttk_value; 3975 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3976 size_t len = dtrace_strlen((char *)s, size); 3977 int64_t i = 0; 3978 3979 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3980 regs[rd] = 0; 3981 break; 3982 } 3983 3984 if (!DTRACE_INSCRATCH(mstate, size)) { 3985 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3986 regs[rd] = 0; 3987 break; 3988 } 3989 3990 if (nargs <= 2) 3991 remaining = (int64_t)size; 3992 3993 if (index < 0) { 3994 index += len; 3995 3996 if (index < 0 && index + remaining > 0) { 3997 remaining += index; 3998 index = 0; 3999 } 4000 } 4001 4002 if (index >= len || index < 0) { 4003 remaining = 0; 4004 } else if (remaining < 0) { 4005 remaining += len - index; 4006 } else if (index + remaining > size) { 4007 remaining = size - index; 4008 } 4009 4010 for (i = 0; i < remaining; i++) { 4011 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4012 break; 4013 } 4014 4015 d[i] = '\0'; 4016 4017 mstate->dtms_scratch_ptr += size; 4018 regs[rd] = (uintptr_t)d; 4019 break; 4020 } 4021 4022 #if defined(sun) 4023 case DIF_SUBR_GETMAJOR: 4024 #ifdef _LP64 4025 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4026 #else 4027 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4028 #endif 4029 break; 4030 4031 case DIF_SUBR_GETMINOR: 4032 #ifdef _LP64 4033 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4034 #else 4035 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4036 #endif 4037 break; 4038 4039 case DIF_SUBR_DDI_PATHNAME: { 4040 /* 4041 * This one is a galactic mess. We are going to roughly 4042 * emulate ddi_pathname(), but it's made more complicated 4043 * by the fact that we (a) want to include the minor name and 4044 * (b) must proceed iteratively instead of recursively. 4045 */ 4046 uintptr_t dest = mstate->dtms_scratch_ptr; 4047 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4048 char *start = (char *)dest, *end = start + size - 1; 4049 uintptr_t daddr = tupregs[0].dttk_value; 4050 int64_t minor = (int64_t)tupregs[1].dttk_value; 4051 char *s; 4052 int i, len, depth = 0; 4053 4054 /* 4055 * Due to all the pointer jumping we do and context we must 4056 * rely upon, we just mandate that the user must have kernel 4057 * read privileges to use this routine. 4058 */ 4059 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4060 *flags |= CPU_DTRACE_KPRIV; 4061 *illval = daddr; 4062 regs[rd] = 0; 4063 } 4064 4065 if (!DTRACE_INSCRATCH(mstate, size)) { 4066 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4067 regs[rd] = 0; 4068 break; 4069 } 4070 4071 *end = '\0'; 4072 4073 /* 4074 * We want to have a name for the minor. In order to do this, 4075 * we need to walk the minor list from the devinfo. We want 4076 * to be sure that we don't infinitely walk a circular list, 4077 * so we check for circularity by sending a scout pointer 4078 * ahead two elements for every element that we iterate over; 4079 * if the list is circular, these will ultimately point to the 4080 * same element. You may recognize this little trick as the 4081 * answer to a stupid interview question -- one that always 4082 * seems to be asked by those who had to have it laboriously 4083 * explained to them, and who can't even concisely describe 4084 * the conditions under which one would be forced to resort to 4085 * this technique. Needless to say, those conditions are 4086 * found here -- and probably only here. Is this the only use 4087 * of this infamous trick in shipping, production code? If it 4088 * isn't, it probably should be... 4089 */ 4090 if (minor != -1) { 4091 uintptr_t maddr = dtrace_loadptr(daddr + 4092 offsetof(struct dev_info, devi_minor)); 4093 4094 uintptr_t next = offsetof(struct ddi_minor_data, next); 4095 uintptr_t name = offsetof(struct ddi_minor_data, 4096 d_minor) + offsetof(struct ddi_minor, name); 4097 uintptr_t dev = offsetof(struct ddi_minor_data, 4098 d_minor) + offsetof(struct ddi_minor, dev); 4099 uintptr_t scout; 4100 4101 if (maddr != NULL) 4102 scout = dtrace_loadptr(maddr + next); 4103 4104 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4105 uint64_t m; 4106 #ifdef _LP64 4107 m = dtrace_load64(maddr + dev) & MAXMIN64; 4108 #else 4109 m = dtrace_load32(maddr + dev) & MAXMIN; 4110 #endif 4111 if (m != minor) { 4112 maddr = dtrace_loadptr(maddr + next); 4113 4114 if (scout == NULL) 4115 continue; 4116 4117 scout = dtrace_loadptr(scout + next); 4118 4119 if (scout == NULL) 4120 continue; 4121 4122 scout = dtrace_loadptr(scout + next); 4123 4124 if (scout == NULL) 4125 continue; 4126 4127 if (scout == maddr) { 4128 *flags |= CPU_DTRACE_ILLOP; 4129 break; 4130 } 4131 4132 continue; 4133 } 4134 4135 /* 4136 * We have the minor data. Now we need to 4137 * copy the minor's name into the end of the 4138 * pathname. 4139 */ 4140 s = (char *)dtrace_loadptr(maddr + name); 4141 len = dtrace_strlen(s, size); 4142 4143 if (*flags & CPU_DTRACE_FAULT) 4144 break; 4145 4146 if (len != 0) { 4147 if ((end -= (len + 1)) < start) 4148 break; 4149 4150 *end = ':'; 4151 } 4152 4153 for (i = 1; i <= len; i++) 4154 end[i] = dtrace_load8((uintptr_t)s++); 4155 break; 4156 } 4157 } 4158 4159 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4160 ddi_node_state_t devi_state; 4161 4162 devi_state = dtrace_load32(daddr + 4163 offsetof(struct dev_info, devi_node_state)); 4164 4165 if (*flags & CPU_DTRACE_FAULT) 4166 break; 4167 4168 if (devi_state >= DS_INITIALIZED) { 4169 s = (char *)dtrace_loadptr(daddr + 4170 offsetof(struct dev_info, devi_addr)); 4171 len = dtrace_strlen(s, size); 4172 4173 if (*flags & CPU_DTRACE_FAULT) 4174 break; 4175 4176 if (len != 0) { 4177 if ((end -= (len + 1)) < start) 4178 break; 4179 4180 *end = '@'; 4181 } 4182 4183 for (i = 1; i <= len; i++) 4184 end[i] = dtrace_load8((uintptr_t)s++); 4185 } 4186 4187 /* 4188 * Now for the node name... 4189 */ 4190 s = (char *)dtrace_loadptr(daddr + 4191 offsetof(struct dev_info, devi_node_name)); 4192 4193 daddr = dtrace_loadptr(daddr + 4194 offsetof(struct dev_info, devi_parent)); 4195 4196 /* 4197 * If our parent is NULL (that is, if we're the root 4198 * node), we're going to use the special path 4199 * "devices". 4200 */ 4201 if (daddr == 0) 4202 s = "devices"; 4203 4204 len = dtrace_strlen(s, size); 4205 if (*flags & CPU_DTRACE_FAULT) 4206 break; 4207 4208 if ((end -= (len + 1)) < start) 4209 break; 4210 4211 for (i = 1; i <= len; i++) 4212 end[i] = dtrace_load8((uintptr_t)s++); 4213 *end = '/'; 4214 4215 if (depth++ > dtrace_devdepth_max) { 4216 *flags |= CPU_DTRACE_ILLOP; 4217 break; 4218 } 4219 } 4220 4221 if (end < start) 4222 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4223 4224 if (daddr == 0) { 4225 regs[rd] = (uintptr_t)end; 4226 mstate->dtms_scratch_ptr += size; 4227 } 4228 4229 break; 4230 } 4231 #endif 4232 4233 case DIF_SUBR_STRJOIN: { 4234 char *d = (char *)mstate->dtms_scratch_ptr; 4235 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4236 uintptr_t s1 = tupregs[0].dttk_value; 4237 uintptr_t s2 = tupregs[1].dttk_value; 4238 int i = 0; 4239 4240 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4241 !dtrace_strcanload(s2, size, mstate, vstate)) { 4242 regs[rd] = 0; 4243 break; 4244 } 4245 4246 if (!DTRACE_INSCRATCH(mstate, size)) { 4247 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4248 regs[rd] = 0; 4249 break; 4250 } 4251 4252 for (;;) { 4253 if (i >= size) { 4254 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4255 regs[rd] = 0; 4256 break; 4257 } 4258 4259 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4260 i--; 4261 break; 4262 } 4263 } 4264 4265 for (;;) { 4266 if (i >= size) { 4267 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4268 regs[rd] = 0; 4269 break; 4270 } 4271 4272 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4273 break; 4274 } 4275 4276 if (i < size) { 4277 mstate->dtms_scratch_ptr += i; 4278 regs[rd] = (uintptr_t)d; 4279 } 4280 4281 break; 4282 } 4283 4284 case DIF_SUBR_LLTOSTR: { 4285 int64_t i = (int64_t)tupregs[0].dttk_value; 4286 int64_t val = i < 0 ? i * -1 : i; 4287 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4288 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4289 4290 if (!DTRACE_INSCRATCH(mstate, size)) { 4291 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4292 regs[rd] = 0; 4293 break; 4294 } 4295 4296 for (*end-- = '\0'; val; val /= 10) 4297 *end-- = '0' + (val % 10); 4298 4299 if (i == 0) 4300 *end-- = '0'; 4301 4302 if (i < 0) 4303 *end-- = '-'; 4304 4305 regs[rd] = (uintptr_t)end + 1; 4306 mstate->dtms_scratch_ptr += size; 4307 break; 4308 } 4309 4310 case DIF_SUBR_HTONS: 4311 case DIF_SUBR_NTOHS: 4312 #if BYTE_ORDER == BIG_ENDIAN 4313 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4314 #else 4315 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4316 #endif 4317 break; 4318 4319 4320 case DIF_SUBR_HTONL: 4321 case DIF_SUBR_NTOHL: 4322 #if BYTE_ORDER == BIG_ENDIAN 4323 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4324 #else 4325 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4326 #endif 4327 break; 4328 4329 4330 case DIF_SUBR_HTONLL: 4331 case DIF_SUBR_NTOHLL: 4332 #if BYTE_ORDER == BIG_ENDIAN 4333 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4334 #else 4335 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4336 #endif 4337 break; 4338 4339 4340 case DIF_SUBR_DIRNAME: 4341 case DIF_SUBR_BASENAME: { 4342 char *dest = (char *)mstate->dtms_scratch_ptr; 4343 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4344 uintptr_t src = tupregs[0].dttk_value; 4345 int i, j, len = dtrace_strlen((char *)src, size); 4346 int lastbase = -1, firstbase = -1, lastdir = -1; 4347 int start, end; 4348 4349 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4350 regs[rd] = 0; 4351 break; 4352 } 4353 4354 if (!DTRACE_INSCRATCH(mstate, size)) { 4355 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4356 regs[rd] = 0; 4357 break; 4358 } 4359 4360 /* 4361 * The basename and dirname for a zero-length string is 4362 * defined to be "." 4363 */ 4364 if (len == 0) { 4365 len = 1; 4366 src = (uintptr_t)"."; 4367 } 4368 4369 /* 4370 * Start from the back of the string, moving back toward the 4371 * front until we see a character that isn't a slash. That 4372 * character is the last character in the basename. 4373 */ 4374 for (i = len - 1; i >= 0; i--) { 4375 if (dtrace_load8(src + i) != '/') 4376 break; 4377 } 4378 4379 if (i >= 0) 4380 lastbase = i; 4381 4382 /* 4383 * Starting from the last character in the basename, move 4384 * towards the front until we find a slash. The character 4385 * that we processed immediately before that is the first 4386 * character in the basename. 4387 */ 4388 for (; i >= 0; i--) { 4389 if (dtrace_load8(src + i) == '/') 4390 break; 4391 } 4392 4393 if (i >= 0) 4394 firstbase = i + 1; 4395 4396 /* 4397 * Now keep going until we find a non-slash character. That 4398 * character is the last character in the dirname. 4399 */ 4400 for (; i >= 0; i--) { 4401 if (dtrace_load8(src + i) != '/') 4402 break; 4403 } 4404 4405 if (i >= 0) 4406 lastdir = i; 4407 4408 ASSERT(!(lastbase == -1 && firstbase != -1)); 4409 ASSERT(!(firstbase == -1 && lastdir != -1)); 4410 4411 if (lastbase == -1) { 4412 /* 4413 * We didn't find a non-slash character. We know that 4414 * the length is non-zero, so the whole string must be 4415 * slashes. In either the dirname or the basename 4416 * case, we return '/'. 4417 */ 4418 ASSERT(firstbase == -1); 4419 firstbase = lastbase = lastdir = 0; 4420 } 4421 4422 if (firstbase == -1) { 4423 /* 4424 * The entire string consists only of a basename 4425 * component. If we're looking for dirname, we need 4426 * to change our string to be just "."; if we're 4427 * looking for a basename, we'll just set the first 4428 * character of the basename to be 0. 4429 */ 4430 if (subr == DIF_SUBR_DIRNAME) { 4431 ASSERT(lastdir == -1); 4432 src = (uintptr_t)"."; 4433 lastdir = 0; 4434 } else { 4435 firstbase = 0; 4436 } 4437 } 4438 4439 if (subr == DIF_SUBR_DIRNAME) { 4440 if (lastdir == -1) { 4441 /* 4442 * We know that we have a slash in the name -- 4443 * or lastdir would be set to 0, above. And 4444 * because lastdir is -1, we know that this 4445 * slash must be the first character. (That 4446 * is, the full string must be of the form 4447 * "/basename".) In this case, the last 4448 * character of the directory name is 0. 4449 */ 4450 lastdir = 0; 4451 } 4452 4453 start = 0; 4454 end = lastdir; 4455 } else { 4456 ASSERT(subr == DIF_SUBR_BASENAME); 4457 ASSERT(firstbase != -1 && lastbase != -1); 4458 start = firstbase; 4459 end = lastbase; 4460 } 4461 4462 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4463 dest[j] = dtrace_load8(src + i); 4464 4465 dest[j] = '\0'; 4466 regs[rd] = (uintptr_t)dest; 4467 mstate->dtms_scratch_ptr += size; 4468 break; 4469 } 4470 4471 case DIF_SUBR_CLEANPATH: { 4472 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4473 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4474 uintptr_t src = tupregs[0].dttk_value; 4475 int i = 0, j = 0; 4476 4477 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4478 regs[rd] = 0; 4479 break; 4480 } 4481 4482 if (!DTRACE_INSCRATCH(mstate, size)) { 4483 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4484 regs[rd] = 0; 4485 break; 4486 } 4487 4488 /* 4489 * Move forward, loading each character. 4490 */ 4491 do { 4492 c = dtrace_load8(src + i++); 4493 next: 4494 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4495 break; 4496 4497 if (c != '/') { 4498 dest[j++] = c; 4499 continue; 4500 } 4501 4502 c = dtrace_load8(src + i++); 4503 4504 if (c == '/') { 4505 /* 4506 * We have two slashes -- we can just advance 4507 * to the next character. 4508 */ 4509 goto next; 4510 } 4511 4512 if (c != '.') { 4513 /* 4514 * This is not "." and it's not ".." -- we can 4515 * just store the "/" and this character and 4516 * drive on. 4517 */ 4518 dest[j++] = '/'; 4519 dest[j++] = c; 4520 continue; 4521 } 4522 4523 c = dtrace_load8(src + i++); 4524 4525 if (c == '/') { 4526 /* 4527 * This is a "/./" component. We're not going 4528 * to store anything in the destination buffer; 4529 * we're just going to go to the next component. 4530 */ 4531 goto next; 4532 } 4533 4534 if (c != '.') { 4535 /* 4536 * This is not ".." -- we can just store the 4537 * "/." and this character and continue 4538 * processing. 4539 */ 4540 dest[j++] = '/'; 4541 dest[j++] = '.'; 4542 dest[j++] = c; 4543 continue; 4544 } 4545 4546 c = dtrace_load8(src + i++); 4547 4548 if (c != '/' && c != '\0') { 4549 /* 4550 * This is not ".." -- it's "..[mumble]". 4551 * We'll store the "/.." and this character 4552 * and continue processing. 4553 */ 4554 dest[j++] = '/'; 4555 dest[j++] = '.'; 4556 dest[j++] = '.'; 4557 dest[j++] = c; 4558 continue; 4559 } 4560 4561 /* 4562 * This is "/../" or "/..\0". We need to back up 4563 * our destination pointer until we find a "/". 4564 */ 4565 i--; 4566 while (j != 0 && dest[--j] != '/') 4567 continue; 4568 4569 if (c == '\0') 4570 dest[++j] = '/'; 4571 } while (c != '\0'); 4572 4573 dest[j] = '\0'; 4574 regs[rd] = (uintptr_t)dest; 4575 mstate->dtms_scratch_ptr += size; 4576 break; 4577 } 4578 4579 case DIF_SUBR_INET_NTOA: 4580 case DIF_SUBR_INET_NTOA6: 4581 case DIF_SUBR_INET_NTOP: { 4582 size_t size; 4583 int af, argi, i; 4584 char *base, *end; 4585 4586 if (subr == DIF_SUBR_INET_NTOP) { 4587 af = (int)tupregs[0].dttk_value; 4588 argi = 1; 4589 } else { 4590 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4591 argi = 0; 4592 } 4593 4594 if (af == AF_INET) { 4595 ipaddr_t ip4; 4596 uint8_t *ptr8, val; 4597 4598 /* 4599 * Safely load the IPv4 address. 4600 */ 4601 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4602 4603 /* 4604 * Check an IPv4 string will fit in scratch. 4605 */ 4606 size = INET_ADDRSTRLEN; 4607 if (!DTRACE_INSCRATCH(mstate, size)) { 4608 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4609 regs[rd] = 0; 4610 break; 4611 } 4612 base = (char *)mstate->dtms_scratch_ptr; 4613 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4614 4615 /* 4616 * Stringify as a dotted decimal quad. 4617 */ 4618 *end-- = '\0'; 4619 ptr8 = (uint8_t *)&ip4; 4620 for (i = 3; i >= 0; i--) { 4621 val = ptr8[i]; 4622 4623 if (val == 0) { 4624 *end-- = '0'; 4625 } else { 4626 for (; val; val /= 10) { 4627 *end-- = '0' + (val % 10); 4628 } 4629 } 4630 4631 if (i > 0) 4632 *end-- = '.'; 4633 } 4634 ASSERT(end + 1 >= base); 4635 4636 } else if (af == AF_INET6) { 4637 struct in6_addr ip6; 4638 int firstzero, tryzero, numzero, v6end; 4639 uint16_t val; 4640 const char digits[] = "0123456789abcdef"; 4641 4642 /* 4643 * Stringify using RFC 1884 convention 2 - 16 bit 4644 * hexadecimal values with a zero-run compression. 4645 * Lower case hexadecimal digits are used. 4646 * eg, fe80::214:4fff:fe0b:76c8. 4647 * The IPv4 embedded form is returned for inet_ntop, 4648 * just the IPv4 string is returned for inet_ntoa6. 4649 */ 4650 4651 /* 4652 * Safely load the IPv6 address. 4653 */ 4654 dtrace_bcopy( 4655 (void *)(uintptr_t)tupregs[argi].dttk_value, 4656 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4657 4658 /* 4659 * Check an IPv6 string will fit in scratch. 4660 */ 4661 size = INET6_ADDRSTRLEN; 4662 if (!DTRACE_INSCRATCH(mstate, size)) { 4663 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4664 regs[rd] = 0; 4665 break; 4666 } 4667 base = (char *)mstate->dtms_scratch_ptr; 4668 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4669 *end-- = '\0'; 4670 4671 /* 4672 * Find the longest run of 16 bit zero values 4673 * for the single allowed zero compression - "::". 4674 */ 4675 firstzero = -1; 4676 tryzero = -1; 4677 numzero = 1; 4678 for (i = 0; i < sizeof (struct in6_addr); i++) { 4679 #if defined(sun) 4680 if (ip6._S6_un._S6_u8[i] == 0 && 4681 #else 4682 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4683 #endif 4684 tryzero == -1 && i % 2 == 0) { 4685 tryzero = i; 4686 continue; 4687 } 4688 4689 if (tryzero != -1 && 4690 #if defined(sun) 4691 (ip6._S6_un._S6_u8[i] != 0 || 4692 #else 4693 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4694 #endif 4695 i == sizeof (struct in6_addr) - 1)) { 4696 4697 if (i - tryzero <= numzero) { 4698 tryzero = -1; 4699 continue; 4700 } 4701 4702 firstzero = tryzero; 4703 numzero = i - i % 2 - tryzero; 4704 tryzero = -1; 4705 4706 #if defined(sun) 4707 if (ip6._S6_un._S6_u8[i] == 0 && 4708 #else 4709 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4710 #endif 4711 i == sizeof (struct in6_addr) - 1) 4712 numzero += 2; 4713 } 4714 } 4715 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4716 4717 /* 4718 * Check for an IPv4 embedded address. 4719 */ 4720 v6end = sizeof (struct in6_addr) - 2; 4721 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4722 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4723 for (i = sizeof (struct in6_addr) - 1; 4724 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4725 ASSERT(end >= base); 4726 4727 #if defined(sun) 4728 val = ip6._S6_un._S6_u8[i]; 4729 #else 4730 val = ip6.__u6_addr.__u6_addr8[i]; 4731 #endif 4732 4733 if (val == 0) { 4734 *end-- = '0'; 4735 } else { 4736 for (; val; val /= 10) { 4737 *end-- = '0' + val % 10; 4738 } 4739 } 4740 4741 if (i > DTRACE_V4MAPPED_OFFSET) 4742 *end-- = '.'; 4743 } 4744 4745 if (subr == DIF_SUBR_INET_NTOA6) 4746 goto inetout; 4747 4748 /* 4749 * Set v6end to skip the IPv4 address that 4750 * we have already stringified. 4751 */ 4752 v6end = 10; 4753 } 4754 4755 /* 4756 * Build the IPv6 string by working through the 4757 * address in reverse. 4758 */ 4759 for (i = v6end; i >= 0; i -= 2) { 4760 ASSERT(end >= base); 4761 4762 if (i == firstzero + numzero - 2) { 4763 *end-- = ':'; 4764 *end-- = ':'; 4765 i -= numzero - 2; 4766 continue; 4767 } 4768 4769 if (i < 14 && i != firstzero - 2) 4770 *end-- = ':'; 4771 4772 #if defined(sun) 4773 val = (ip6._S6_un._S6_u8[i] << 8) + 4774 ip6._S6_un._S6_u8[i + 1]; 4775 #else 4776 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4777 ip6.__u6_addr.__u6_addr8[i + 1]; 4778 #endif 4779 4780 if (val == 0) { 4781 *end-- = '0'; 4782 } else { 4783 for (; val; val /= 16) { 4784 *end-- = digits[val % 16]; 4785 } 4786 } 4787 } 4788 ASSERT(end + 1 >= base); 4789 4790 } else { 4791 /* 4792 * The user didn't use AH_INET or AH_INET6. 4793 */ 4794 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4795 regs[rd] = 0; 4796 break; 4797 } 4798 4799 inetout: regs[rd] = (uintptr_t)end + 1; 4800 mstate->dtms_scratch_ptr += size; 4801 break; 4802 } 4803 4804 case DIF_SUBR_MEMREF: { 4805 uintptr_t size = 2 * sizeof(uintptr_t); 4806 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4807 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4808 4809 /* address and length */ 4810 memref[0] = tupregs[0].dttk_value; 4811 memref[1] = tupregs[1].dttk_value; 4812 4813 regs[rd] = (uintptr_t) memref; 4814 mstate->dtms_scratch_ptr += scratch_size; 4815 break; 4816 } 4817 4818 case DIF_SUBR_TYPEREF: { 4819 uintptr_t size = 4 * sizeof(uintptr_t); 4820 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4821 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4822 4823 /* address, num_elements, type_str, type_len */ 4824 typeref[0] = tupregs[0].dttk_value; 4825 typeref[1] = tupregs[1].dttk_value; 4826 typeref[2] = tupregs[2].dttk_value; 4827 typeref[3] = tupregs[3].dttk_value; 4828 4829 regs[rd] = (uintptr_t) typeref; 4830 mstate->dtms_scratch_ptr += scratch_size; 4831 break; 4832 } 4833 } 4834 } 4835 4836 /* 4837 * Emulate the execution of DTrace IR instructions specified by the given 4838 * DIF object. This function is deliberately void of assertions as all of 4839 * the necessary checks are handled by a call to dtrace_difo_validate(). 4840 */ 4841 static uint64_t 4842 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4843 dtrace_vstate_t *vstate, dtrace_state_t *state) 4844 { 4845 const dif_instr_t *text = difo->dtdo_buf; 4846 const uint_t textlen = difo->dtdo_len; 4847 const char *strtab = difo->dtdo_strtab; 4848 const uint64_t *inttab = difo->dtdo_inttab; 4849 4850 uint64_t rval = 0; 4851 dtrace_statvar_t *svar; 4852 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4853 dtrace_difv_t *v; 4854 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4855 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4856 4857 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4858 uint64_t regs[DIF_DIR_NREGS]; 4859 uint64_t *tmp; 4860 4861 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4862 int64_t cc_r; 4863 uint_t pc = 0, id, opc = 0; 4864 uint8_t ttop = 0; 4865 dif_instr_t instr; 4866 uint_t r1, r2, rd; 4867 4868 /* 4869 * We stash the current DIF object into the machine state: we need it 4870 * for subsequent access checking. 4871 */ 4872 mstate->dtms_difo = difo; 4873 4874 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4875 4876 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4877 opc = pc; 4878 4879 instr = text[pc++]; 4880 r1 = DIF_INSTR_R1(instr); 4881 r2 = DIF_INSTR_R2(instr); 4882 rd = DIF_INSTR_RD(instr); 4883 4884 switch (DIF_INSTR_OP(instr)) { 4885 case DIF_OP_OR: 4886 regs[rd] = regs[r1] | regs[r2]; 4887 break; 4888 case DIF_OP_XOR: 4889 regs[rd] = regs[r1] ^ regs[r2]; 4890 break; 4891 case DIF_OP_AND: 4892 regs[rd] = regs[r1] & regs[r2]; 4893 break; 4894 case DIF_OP_SLL: 4895 regs[rd] = regs[r1] << regs[r2]; 4896 break; 4897 case DIF_OP_SRL: 4898 regs[rd] = regs[r1] >> regs[r2]; 4899 break; 4900 case DIF_OP_SUB: 4901 regs[rd] = regs[r1] - regs[r2]; 4902 break; 4903 case DIF_OP_ADD: 4904 regs[rd] = regs[r1] + regs[r2]; 4905 break; 4906 case DIF_OP_MUL: 4907 regs[rd] = regs[r1] * regs[r2]; 4908 break; 4909 case DIF_OP_SDIV: 4910 if (regs[r2] == 0) { 4911 regs[rd] = 0; 4912 *flags |= CPU_DTRACE_DIVZERO; 4913 } else { 4914 regs[rd] = (int64_t)regs[r1] / 4915 (int64_t)regs[r2]; 4916 } 4917 break; 4918 4919 case DIF_OP_UDIV: 4920 if (regs[r2] == 0) { 4921 regs[rd] = 0; 4922 *flags |= CPU_DTRACE_DIVZERO; 4923 } else { 4924 regs[rd] = regs[r1] / regs[r2]; 4925 } 4926 break; 4927 4928 case DIF_OP_SREM: 4929 if (regs[r2] == 0) { 4930 regs[rd] = 0; 4931 *flags |= CPU_DTRACE_DIVZERO; 4932 } else { 4933 regs[rd] = (int64_t)regs[r1] % 4934 (int64_t)regs[r2]; 4935 } 4936 break; 4937 4938 case DIF_OP_UREM: 4939 if (regs[r2] == 0) { 4940 regs[rd] = 0; 4941 *flags |= CPU_DTRACE_DIVZERO; 4942 } else { 4943 regs[rd] = regs[r1] % regs[r2]; 4944 } 4945 break; 4946 4947 case DIF_OP_NOT: 4948 regs[rd] = ~regs[r1]; 4949 break; 4950 case DIF_OP_MOV: 4951 regs[rd] = regs[r1]; 4952 break; 4953 case DIF_OP_CMP: 4954 cc_r = regs[r1] - regs[r2]; 4955 cc_n = cc_r < 0; 4956 cc_z = cc_r == 0; 4957 cc_v = 0; 4958 cc_c = regs[r1] < regs[r2]; 4959 break; 4960 case DIF_OP_TST: 4961 cc_n = cc_v = cc_c = 0; 4962 cc_z = regs[r1] == 0; 4963 break; 4964 case DIF_OP_BA: 4965 pc = DIF_INSTR_LABEL(instr); 4966 break; 4967 case DIF_OP_BE: 4968 if (cc_z) 4969 pc = DIF_INSTR_LABEL(instr); 4970 break; 4971 case DIF_OP_BNE: 4972 if (cc_z == 0) 4973 pc = DIF_INSTR_LABEL(instr); 4974 break; 4975 case DIF_OP_BG: 4976 if ((cc_z | (cc_n ^ cc_v)) == 0) 4977 pc = DIF_INSTR_LABEL(instr); 4978 break; 4979 case DIF_OP_BGU: 4980 if ((cc_c | cc_z) == 0) 4981 pc = DIF_INSTR_LABEL(instr); 4982 break; 4983 case DIF_OP_BGE: 4984 if ((cc_n ^ cc_v) == 0) 4985 pc = DIF_INSTR_LABEL(instr); 4986 break; 4987 case DIF_OP_BGEU: 4988 if (cc_c == 0) 4989 pc = DIF_INSTR_LABEL(instr); 4990 break; 4991 case DIF_OP_BL: 4992 if (cc_n ^ cc_v) 4993 pc = DIF_INSTR_LABEL(instr); 4994 break; 4995 case DIF_OP_BLU: 4996 if (cc_c) 4997 pc = DIF_INSTR_LABEL(instr); 4998 break; 4999 case DIF_OP_BLE: 5000 if (cc_z | (cc_n ^ cc_v)) 5001 pc = DIF_INSTR_LABEL(instr); 5002 break; 5003 case DIF_OP_BLEU: 5004 if (cc_c | cc_z) 5005 pc = DIF_INSTR_LABEL(instr); 5006 break; 5007 case DIF_OP_RLDSB: 5008 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5009 *flags |= CPU_DTRACE_KPRIV; 5010 *illval = regs[r1]; 5011 break; 5012 } 5013 /*FALLTHROUGH*/ 5014 case DIF_OP_LDSB: 5015 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5016 break; 5017 case DIF_OP_RLDSH: 5018 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5019 *flags |= CPU_DTRACE_KPRIV; 5020 *illval = regs[r1]; 5021 break; 5022 } 5023 /*FALLTHROUGH*/ 5024 case DIF_OP_LDSH: 5025 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5026 break; 5027 case DIF_OP_RLDSW: 5028 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5029 *flags |= CPU_DTRACE_KPRIV; 5030 *illval = regs[r1]; 5031 break; 5032 } 5033 /*FALLTHROUGH*/ 5034 case DIF_OP_LDSW: 5035 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5036 break; 5037 case DIF_OP_RLDUB: 5038 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5039 *flags |= CPU_DTRACE_KPRIV; 5040 *illval = regs[r1]; 5041 break; 5042 } 5043 /*FALLTHROUGH*/ 5044 case DIF_OP_LDUB: 5045 regs[rd] = dtrace_load8(regs[r1]); 5046 break; 5047 case DIF_OP_RLDUH: 5048 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5049 *flags |= CPU_DTRACE_KPRIV; 5050 *illval = regs[r1]; 5051 break; 5052 } 5053 /*FALLTHROUGH*/ 5054 case DIF_OP_LDUH: 5055 regs[rd] = dtrace_load16(regs[r1]); 5056 break; 5057 case DIF_OP_RLDUW: 5058 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5059 *flags |= CPU_DTRACE_KPRIV; 5060 *illval = regs[r1]; 5061 break; 5062 } 5063 /*FALLTHROUGH*/ 5064 case DIF_OP_LDUW: 5065 regs[rd] = dtrace_load32(regs[r1]); 5066 break; 5067 case DIF_OP_RLDX: 5068 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5069 *flags |= CPU_DTRACE_KPRIV; 5070 *illval = regs[r1]; 5071 break; 5072 } 5073 /*FALLTHROUGH*/ 5074 case DIF_OP_LDX: 5075 regs[rd] = dtrace_load64(regs[r1]); 5076 break; 5077 case DIF_OP_ULDSB: 5078 regs[rd] = (int8_t) 5079 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5080 break; 5081 case DIF_OP_ULDSH: 5082 regs[rd] = (int16_t) 5083 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5084 break; 5085 case DIF_OP_ULDSW: 5086 regs[rd] = (int32_t) 5087 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5088 break; 5089 case DIF_OP_ULDUB: 5090 regs[rd] = 5091 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5092 break; 5093 case DIF_OP_ULDUH: 5094 regs[rd] = 5095 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5096 break; 5097 case DIF_OP_ULDUW: 5098 regs[rd] = 5099 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5100 break; 5101 case DIF_OP_ULDX: 5102 regs[rd] = 5103 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5104 break; 5105 case DIF_OP_RET: 5106 rval = regs[rd]; 5107 pc = textlen; 5108 break; 5109 case DIF_OP_NOP: 5110 break; 5111 case DIF_OP_SETX: 5112 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5113 break; 5114 case DIF_OP_SETS: 5115 regs[rd] = (uint64_t)(uintptr_t) 5116 (strtab + DIF_INSTR_STRING(instr)); 5117 break; 5118 case DIF_OP_SCMP: { 5119 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5120 uintptr_t s1 = regs[r1]; 5121 uintptr_t s2 = regs[r2]; 5122 5123 if (s1 != 0 && 5124 !dtrace_strcanload(s1, sz, mstate, vstate)) 5125 break; 5126 if (s2 != 0 && 5127 !dtrace_strcanload(s2, sz, mstate, vstate)) 5128 break; 5129 5130 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5131 5132 cc_n = cc_r < 0; 5133 cc_z = cc_r == 0; 5134 cc_v = cc_c = 0; 5135 break; 5136 } 5137 case DIF_OP_LDGA: 5138 regs[rd] = dtrace_dif_variable(mstate, state, 5139 r1, regs[r2]); 5140 break; 5141 case DIF_OP_LDGS: 5142 id = DIF_INSTR_VAR(instr); 5143 5144 if (id >= DIF_VAR_OTHER_UBASE) { 5145 uintptr_t a; 5146 5147 id -= DIF_VAR_OTHER_UBASE; 5148 svar = vstate->dtvs_globals[id]; 5149 ASSERT(svar != NULL); 5150 v = &svar->dtsv_var; 5151 5152 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5153 regs[rd] = svar->dtsv_data; 5154 break; 5155 } 5156 5157 a = (uintptr_t)svar->dtsv_data; 5158 5159 if (*(uint8_t *)a == UINT8_MAX) { 5160 /* 5161 * If the 0th byte is set to UINT8_MAX 5162 * then this is to be treated as a 5163 * reference to a NULL variable. 5164 */ 5165 regs[rd] = 0; 5166 } else { 5167 regs[rd] = a + sizeof (uint64_t); 5168 } 5169 5170 break; 5171 } 5172 5173 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5174 break; 5175 5176 case DIF_OP_STGS: 5177 id = DIF_INSTR_VAR(instr); 5178 5179 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5180 id -= DIF_VAR_OTHER_UBASE; 5181 5182 svar = vstate->dtvs_globals[id]; 5183 ASSERT(svar != NULL); 5184 v = &svar->dtsv_var; 5185 5186 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5187 uintptr_t a = (uintptr_t)svar->dtsv_data; 5188 5189 ASSERT(a != 0); 5190 ASSERT(svar->dtsv_size != 0); 5191 5192 if (regs[rd] == 0) { 5193 *(uint8_t *)a = UINT8_MAX; 5194 break; 5195 } else { 5196 *(uint8_t *)a = 0; 5197 a += sizeof (uint64_t); 5198 } 5199 if (!dtrace_vcanload( 5200 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5201 mstate, vstate)) 5202 break; 5203 5204 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5205 (void *)a, &v->dtdv_type); 5206 break; 5207 } 5208 5209 svar->dtsv_data = regs[rd]; 5210 break; 5211 5212 case DIF_OP_LDTA: 5213 /* 5214 * There are no DTrace built-in thread-local arrays at 5215 * present. This opcode is saved for future work. 5216 */ 5217 *flags |= CPU_DTRACE_ILLOP; 5218 regs[rd] = 0; 5219 break; 5220 5221 case DIF_OP_LDLS: 5222 id = DIF_INSTR_VAR(instr); 5223 5224 if (id < DIF_VAR_OTHER_UBASE) { 5225 /* 5226 * For now, this has no meaning. 5227 */ 5228 regs[rd] = 0; 5229 break; 5230 } 5231 5232 id -= DIF_VAR_OTHER_UBASE; 5233 5234 ASSERT(id < vstate->dtvs_nlocals); 5235 ASSERT(vstate->dtvs_locals != NULL); 5236 5237 svar = vstate->dtvs_locals[id]; 5238 ASSERT(svar != NULL); 5239 v = &svar->dtsv_var; 5240 5241 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5242 uintptr_t a = (uintptr_t)svar->dtsv_data; 5243 size_t sz = v->dtdv_type.dtdt_size; 5244 5245 sz += sizeof (uint64_t); 5246 ASSERT(svar->dtsv_size == NCPU * sz); 5247 a += curcpu * sz; 5248 5249 if (*(uint8_t *)a == UINT8_MAX) { 5250 /* 5251 * If the 0th byte is set to UINT8_MAX 5252 * then this is to be treated as a 5253 * reference to a NULL variable. 5254 */ 5255 regs[rd] = 0; 5256 } else { 5257 regs[rd] = a + sizeof (uint64_t); 5258 } 5259 5260 break; 5261 } 5262 5263 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5264 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5265 regs[rd] = tmp[curcpu]; 5266 break; 5267 5268 case DIF_OP_STLS: 5269 id = DIF_INSTR_VAR(instr); 5270 5271 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5272 id -= DIF_VAR_OTHER_UBASE; 5273 ASSERT(id < vstate->dtvs_nlocals); 5274 5275 ASSERT(vstate->dtvs_locals != NULL); 5276 svar = vstate->dtvs_locals[id]; 5277 ASSERT(svar != NULL); 5278 v = &svar->dtsv_var; 5279 5280 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5281 uintptr_t a = (uintptr_t)svar->dtsv_data; 5282 size_t sz = v->dtdv_type.dtdt_size; 5283 5284 sz += sizeof (uint64_t); 5285 ASSERT(svar->dtsv_size == NCPU * sz); 5286 a += curcpu * sz; 5287 5288 if (regs[rd] == 0) { 5289 *(uint8_t *)a = UINT8_MAX; 5290 break; 5291 } else { 5292 *(uint8_t *)a = 0; 5293 a += sizeof (uint64_t); 5294 } 5295 5296 if (!dtrace_vcanload( 5297 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5298 mstate, vstate)) 5299 break; 5300 5301 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5302 (void *)a, &v->dtdv_type); 5303 break; 5304 } 5305 5306 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5307 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5308 tmp[curcpu] = regs[rd]; 5309 break; 5310 5311 case DIF_OP_LDTS: { 5312 dtrace_dynvar_t *dvar; 5313 dtrace_key_t *key; 5314 5315 id = DIF_INSTR_VAR(instr); 5316 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5317 id -= DIF_VAR_OTHER_UBASE; 5318 v = &vstate->dtvs_tlocals[id]; 5319 5320 key = &tupregs[DIF_DTR_NREGS]; 5321 key[0].dttk_value = (uint64_t)id; 5322 key[0].dttk_size = 0; 5323 DTRACE_TLS_THRKEY(key[1].dttk_value); 5324 key[1].dttk_size = 0; 5325 5326 dvar = dtrace_dynvar(dstate, 2, key, 5327 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5328 mstate, vstate); 5329 5330 if (dvar == NULL) { 5331 regs[rd] = 0; 5332 break; 5333 } 5334 5335 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5336 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5337 } else { 5338 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5339 } 5340 5341 break; 5342 } 5343 5344 case DIF_OP_STTS: { 5345 dtrace_dynvar_t *dvar; 5346 dtrace_key_t *key; 5347 5348 id = DIF_INSTR_VAR(instr); 5349 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5350 id -= DIF_VAR_OTHER_UBASE; 5351 5352 key = &tupregs[DIF_DTR_NREGS]; 5353 key[0].dttk_value = (uint64_t)id; 5354 key[0].dttk_size = 0; 5355 DTRACE_TLS_THRKEY(key[1].dttk_value); 5356 key[1].dttk_size = 0; 5357 v = &vstate->dtvs_tlocals[id]; 5358 5359 dvar = dtrace_dynvar(dstate, 2, key, 5360 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5361 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5362 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5363 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5364 5365 /* 5366 * Given that we're storing to thread-local data, 5367 * we need to flush our predicate cache. 5368 */ 5369 curthread->t_predcache = 0; 5370 5371 if (dvar == NULL) 5372 break; 5373 5374 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5375 if (!dtrace_vcanload( 5376 (void *)(uintptr_t)regs[rd], 5377 &v->dtdv_type, mstate, vstate)) 5378 break; 5379 5380 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5381 dvar->dtdv_data, &v->dtdv_type); 5382 } else { 5383 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5384 } 5385 5386 break; 5387 } 5388 5389 case DIF_OP_SRA: 5390 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5391 break; 5392 5393 case DIF_OP_CALL: 5394 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5395 regs, tupregs, ttop, mstate, state); 5396 break; 5397 5398 case DIF_OP_PUSHTR: 5399 if (ttop == DIF_DTR_NREGS) { 5400 *flags |= CPU_DTRACE_TUPOFLOW; 5401 break; 5402 } 5403 5404 if (r1 == DIF_TYPE_STRING) { 5405 /* 5406 * If this is a string type and the size is 0, 5407 * we'll use the system-wide default string 5408 * size. Note that we are _not_ looking at 5409 * the value of the DTRACEOPT_STRSIZE option; 5410 * had this been set, we would expect to have 5411 * a non-zero size value in the "pushtr". 5412 */ 5413 tupregs[ttop].dttk_size = 5414 dtrace_strlen((char *)(uintptr_t)regs[rd], 5415 regs[r2] ? regs[r2] : 5416 dtrace_strsize_default) + 1; 5417 } else { 5418 tupregs[ttop].dttk_size = regs[r2]; 5419 } 5420 5421 tupregs[ttop++].dttk_value = regs[rd]; 5422 break; 5423 5424 case DIF_OP_PUSHTV: 5425 if (ttop == DIF_DTR_NREGS) { 5426 *flags |= CPU_DTRACE_TUPOFLOW; 5427 break; 5428 } 5429 5430 tupregs[ttop].dttk_value = regs[rd]; 5431 tupregs[ttop++].dttk_size = 0; 5432 break; 5433 5434 case DIF_OP_POPTS: 5435 if (ttop != 0) 5436 ttop--; 5437 break; 5438 5439 case DIF_OP_FLUSHTS: 5440 ttop = 0; 5441 break; 5442 5443 case DIF_OP_LDGAA: 5444 case DIF_OP_LDTAA: { 5445 dtrace_dynvar_t *dvar; 5446 dtrace_key_t *key = tupregs; 5447 uint_t nkeys = ttop; 5448 5449 id = DIF_INSTR_VAR(instr); 5450 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5451 id -= DIF_VAR_OTHER_UBASE; 5452 5453 key[nkeys].dttk_value = (uint64_t)id; 5454 key[nkeys++].dttk_size = 0; 5455 5456 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5457 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5458 key[nkeys++].dttk_size = 0; 5459 v = &vstate->dtvs_tlocals[id]; 5460 } else { 5461 v = &vstate->dtvs_globals[id]->dtsv_var; 5462 } 5463 5464 dvar = dtrace_dynvar(dstate, nkeys, key, 5465 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5466 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5467 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5468 5469 if (dvar == NULL) { 5470 regs[rd] = 0; 5471 break; 5472 } 5473 5474 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5475 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5476 } else { 5477 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5478 } 5479 5480 break; 5481 } 5482 5483 case DIF_OP_STGAA: 5484 case DIF_OP_STTAA: { 5485 dtrace_dynvar_t *dvar; 5486 dtrace_key_t *key = tupregs; 5487 uint_t nkeys = ttop; 5488 5489 id = DIF_INSTR_VAR(instr); 5490 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5491 id -= DIF_VAR_OTHER_UBASE; 5492 5493 key[nkeys].dttk_value = (uint64_t)id; 5494 key[nkeys++].dttk_size = 0; 5495 5496 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5497 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5498 key[nkeys++].dttk_size = 0; 5499 v = &vstate->dtvs_tlocals[id]; 5500 } else { 5501 v = &vstate->dtvs_globals[id]->dtsv_var; 5502 } 5503 5504 dvar = dtrace_dynvar(dstate, nkeys, key, 5505 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5506 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5507 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5508 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5509 5510 if (dvar == NULL) 5511 break; 5512 5513 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5514 if (!dtrace_vcanload( 5515 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5516 mstate, vstate)) 5517 break; 5518 5519 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5520 dvar->dtdv_data, &v->dtdv_type); 5521 } else { 5522 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5523 } 5524 5525 break; 5526 } 5527 5528 case DIF_OP_ALLOCS: { 5529 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5530 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5531 5532 /* 5533 * Rounding up the user allocation size could have 5534 * overflowed large, bogus allocations (like -1ULL) to 5535 * 0. 5536 */ 5537 if (size < regs[r1] || 5538 !DTRACE_INSCRATCH(mstate, size)) { 5539 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5540 regs[rd] = 0; 5541 break; 5542 } 5543 5544 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5545 mstate->dtms_scratch_ptr += size; 5546 regs[rd] = ptr; 5547 break; 5548 } 5549 5550 case DIF_OP_COPYS: 5551 if (!dtrace_canstore(regs[rd], regs[r2], 5552 mstate, vstate)) { 5553 *flags |= CPU_DTRACE_BADADDR; 5554 *illval = regs[rd]; 5555 break; 5556 } 5557 5558 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5559 break; 5560 5561 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5562 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5563 break; 5564 5565 case DIF_OP_STB: 5566 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5567 *flags |= CPU_DTRACE_BADADDR; 5568 *illval = regs[rd]; 5569 break; 5570 } 5571 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5572 break; 5573 5574 case DIF_OP_STH: 5575 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5576 *flags |= CPU_DTRACE_BADADDR; 5577 *illval = regs[rd]; 5578 break; 5579 } 5580 if (regs[rd] & 1) { 5581 *flags |= CPU_DTRACE_BADALIGN; 5582 *illval = regs[rd]; 5583 break; 5584 } 5585 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5586 break; 5587 5588 case DIF_OP_STW: 5589 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5590 *flags |= CPU_DTRACE_BADADDR; 5591 *illval = regs[rd]; 5592 break; 5593 } 5594 if (regs[rd] & 3) { 5595 *flags |= CPU_DTRACE_BADALIGN; 5596 *illval = regs[rd]; 5597 break; 5598 } 5599 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5600 break; 5601 5602 case DIF_OP_STX: 5603 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5604 *flags |= CPU_DTRACE_BADADDR; 5605 *illval = regs[rd]; 5606 break; 5607 } 5608 if (regs[rd] & 7) { 5609 *flags |= CPU_DTRACE_BADALIGN; 5610 *illval = regs[rd]; 5611 break; 5612 } 5613 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5614 break; 5615 } 5616 } 5617 5618 if (!(*flags & CPU_DTRACE_FAULT)) 5619 return (rval); 5620 5621 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5622 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5623 5624 return (0); 5625 } 5626 5627 static void 5628 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5629 { 5630 dtrace_probe_t *probe = ecb->dte_probe; 5631 dtrace_provider_t *prov = probe->dtpr_provider; 5632 char c[DTRACE_FULLNAMELEN + 80], *str; 5633 char *msg = "dtrace: breakpoint action at probe "; 5634 char *ecbmsg = " (ecb "; 5635 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5636 uintptr_t val = (uintptr_t)ecb; 5637 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5638 5639 if (dtrace_destructive_disallow) 5640 return; 5641 5642 /* 5643 * It's impossible to be taking action on the NULL probe. 5644 */ 5645 ASSERT(probe != NULL); 5646 5647 /* 5648 * This is a poor man's (destitute man's?) sprintf(): we want to 5649 * print the provider name, module name, function name and name of 5650 * the probe, along with the hex address of the ECB with the breakpoint 5651 * action -- all of which we must place in the character buffer by 5652 * hand. 5653 */ 5654 while (*msg != '\0') 5655 c[i++] = *msg++; 5656 5657 for (str = prov->dtpv_name; *str != '\0'; str++) 5658 c[i++] = *str; 5659 c[i++] = ':'; 5660 5661 for (str = probe->dtpr_mod; *str != '\0'; str++) 5662 c[i++] = *str; 5663 c[i++] = ':'; 5664 5665 for (str = probe->dtpr_func; *str != '\0'; str++) 5666 c[i++] = *str; 5667 c[i++] = ':'; 5668 5669 for (str = probe->dtpr_name; *str != '\0'; str++) 5670 c[i++] = *str; 5671 5672 while (*ecbmsg != '\0') 5673 c[i++] = *ecbmsg++; 5674 5675 while (shift >= 0) { 5676 mask = (uintptr_t)0xf << shift; 5677 5678 if (val >= ((uintptr_t)1 << shift)) 5679 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5680 shift -= 4; 5681 } 5682 5683 c[i++] = ')'; 5684 c[i] = '\0'; 5685 5686 #if defined(sun) 5687 debug_enter(c); 5688 #else 5689 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5690 #endif 5691 } 5692 5693 static void 5694 dtrace_action_panic(dtrace_ecb_t *ecb) 5695 { 5696 dtrace_probe_t *probe = ecb->dte_probe; 5697 5698 /* 5699 * It's impossible to be taking action on the NULL probe. 5700 */ 5701 ASSERT(probe != NULL); 5702 5703 if (dtrace_destructive_disallow) 5704 return; 5705 5706 if (dtrace_panicked != NULL) 5707 return; 5708 5709 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5710 return; 5711 5712 /* 5713 * We won the right to panic. (We want to be sure that only one 5714 * thread calls panic() from dtrace_probe(), and that panic() is 5715 * called exactly once.) 5716 */ 5717 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5718 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5719 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5720 } 5721 5722 static void 5723 dtrace_action_raise(uint64_t sig) 5724 { 5725 if (dtrace_destructive_disallow) 5726 return; 5727 5728 if (sig >= NSIG) { 5729 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5730 return; 5731 } 5732 5733 #if defined(sun) 5734 /* 5735 * raise() has a queue depth of 1 -- we ignore all subsequent 5736 * invocations of the raise() action. 5737 */ 5738 if (curthread->t_dtrace_sig == 0) 5739 curthread->t_dtrace_sig = (uint8_t)sig; 5740 5741 curthread->t_sig_check = 1; 5742 aston(curthread); 5743 #else 5744 struct proc *p = curproc; 5745 PROC_LOCK(p); 5746 kern_psignal(p, sig); 5747 PROC_UNLOCK(p); 5748 #endif 5749 } 5750 5751 static void 5752 dtrace_action_stop(void) 5753 { 5754 if (dtrace_destructive_disallow) 5755 return; 5756 5757 #if defined(sun) 5758 if (!curthread->t_dtrace_stop) { 5759 curthread->t_dtrace_stop = 1; 5760 curthread->t_sig_check = 1; 5761 aston(curthread); 5762 } 5763 #else 5764 struct proc *p = curproc; 5765 PROC_LOCK(p); 5766 kern_psignal(p, SIGSTOP); 5767 PROC_UNLOCK(p); 5768 #endif 5769 } 5770 5771 static void 5772 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5773 { 5774 hrtime_t now; 5775 volatile uint16_t *flags; 5776 #if defined(sun) 5777 cpu_t *cpu = CPU; 5778 #else 5779 cpu_t *cpu = &solaris_cpu[curcpu]; 5780 #endif 5781 5782 if (dtrace_destructive_disallow) 5783 return; 5784 5785 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5786 5787 now = dtrace_gethrtime(); 5788 5789 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5790 /* 5791 * We need to advance the mark to the current time. 5792 */ 5793 cpu->cpu_dtrace_chillmark = now; 5794 cpu->cpu_dtrace_chilled = 0; 5795 } 5796 5797 /* 5798 * Now check to see if the requested chill time would take us over 5799 * the maximum amount of time allowed in the chill interval. (Or 5800 * worse, if the calculation itself induces overflow.) 5801 */ 5802 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5803 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5804 *flags |= CPU_DTRACE_ILLOP; 5805 return; 5806 } 5807 5808 while (dtrace_gethrtime() - now < val) 5809 continue; 5810 5811 /* 5812 * Normally, we assure that the value of the variable "timestamp" does 5813 * not change within an ECB. The presence of chill() represents an 5814 * exception to this rule, however. 5815 */ 5816 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5817 cpu->cpu_dtrace_chilled += val; 5818 } 5819 5820 static void 5821 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5822 uint64_t *buf, uint64_t arg) 5823 { 5824 int nframes = DTRACE_USTACK_NFRAMES(arg); 5825 int strsize = DTRACE_USTACK_STRSIZE(arg); 5826 uint64_t *pcs = &buf[1], *fps; 5827 char *str = (char *)&pcs[nframes]; 5828 int size, offs = 0, i, j; 5829 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5830 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5831 char *sym; 5832 5833 /* 5834 * Should be taking a faster path if string space has not been 5835 * allocated. 5836 */ 5837 ASSERT(strsize != 0); 5838 5839 /* 5840 * We will first allocate some temporary space for the frame pointers. 5841 */ 5842 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5843 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5844 (nframes * sizeof (uint64_t)); 5845 5846 if (!DTRACE_INSCRATCH(mstate, size)) { 5847 /* 5848 * Not enough room for our frame pointers -- need to indicate 5849 * that we ran out of scratch space. 5850 */ 5851 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5852 return; 5853 } 5854 5855 mstate->dtms_scratch_ptr += size; 5856 saved = mstate->dtms_scratch_ptr; 5857 5858 /* 5859 * Now get a stack with both program counters and frame pointers. 5860 */ 5861 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5862 dtrace_getufpstack(buf, fps, nframes + 1); 5863 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5864 5865 /* 5866 * If that faulted, we're cooked. 5867 */ 5868 if (*flags & CPU_DTRACE_FAULT) 5869 goto out; 5870 5871 /* 5872 * Now we want to walk up the stack, calling the USTACK helper. For 5873 * each iteration, we restore the scratch pointer. 5874 */ 5875 for (i = 0; i < nframes; i++) { 5876 mstate->dtms_scratch_ptr = saved; 5877 5878 if (offs >= strsize) 5879 break; 5880 5881 sym = (char *)(uintptr_t)dtrace_helper( 5882 DTRACE_HELPER_ACTION_USTACK, 5883 mstate, state, pcs[i], fps[i]); 5884 5885 /* 5886 * If we faulted while running the helper, we're going to 5887 * clear the fault and null out the corresponding string. 5888 */ 5889 if (*flags & CPU_DTRACE_FAULT) { 5890 *flags &= ~CPU_DTRACE_FAULT; 5891 str[offs++] = '\0'; 5892 continue; 5893 } 5894 5895 if (sym == NULL) { 5896 str[offs++] = '\0'; 5897 continue; 5898 } 5899 5900 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5901 5902 /* 5903 * Now copy in the string that the helper returned to us. 5904 */ 5905 for (j = 0; offs + j < strsize; j++) { 5906 if ((str[offs + j] = sym[j]) == '\0') 5907 break; 5908 } 5909 5910 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5911 5912 offs += j + 1; 5913 } 5914 5915 if (offs >= strsize) { 5916 /* 5917 * If we didn't have room for all of the strings, we don't 5918 * abort processing -- this needn't be a fatal error -- but we 5919 * still want to increment a counter (dts_stkstroverflows) to 5920 * allow this condition to be warned about. (If this is from 5921 * a jstack() action, it is easily tuned via jstackstrsize.) 5922 */ 5923 dtrace_error(&state->dts_stkstroverflows); 5924 } 5925 5926 while (offs < strsize) 5927 str[offs++] = '\0'; 5928 5929 out: 5930 mstate->dtms_scratch_ptr = old; 5931 } 5932 5933 /* 5934 * If you're looking for the epicenter of DTrace, you just found it. This 5935 * is the function called by the provider to fire a probe -- from which all 5936 * subsequent probe-context DTrace activity emanates. 5937 */ 5938 void 5939 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5940 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5941 { 5942 processorid_t cpuid; 5943 dtrace_icookie_t cookie; 5944 dtrace_probe_t *probe; 5945 dtrace_mstate_t mstate; 5946 dtrace_ecb_t *ecb; 5947 dtrace_action_t *act; 5948 intptr_t offs; 5949 size_t size; 5950 int vtime, onintr; 5951 volatile uint16_t *flags; 5952 hrtime_t now; 5953 5954 if (panicstr != NULL) 5955 return; 5956 5957 #if defined(sun) 5958 /* 5959 * Kick out immediately if this CPU is still being born (in which case 5960 * curthread will be set to -1) or the current thread can't allow 5961 * probes in its current context. 5962 */ 5963 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5964 return; 5965 #endif 5966 5967 cookie = dtrace_interrupt_disable(); 5968 probe = dtrace_probes[id - 1]; 5969 cpuid = curcpu; 5970 onintr = CPU_ON_INTR(CPU); 5971 5972 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5973 probe->dtpr_predcache == curthread->t_predcache) { 5974 /* 5975 * We have hit in the predicate cache; we know that 5976 * this predicate would evaluate to be false. 5977 */ 5978 dtrace_interrupt_enable(cookie); 5979 return; 5980 } 5981 5982 #if defined(sun) 5983 if (panic_quiesce) { 5984 #else 5985 if (panicstr != NULL) { 5986 #endif 5987 /* 5988 * We don't trace anything if we're panicking. 5989 */ 5990 dtrace_interrupt_enable(cookie); 5991 return; 5992 } 5993 5994 now = dtrace_gethrtime(); 5995 vtime = dtrace_vtime_references != 0; 5996 5997 if (vtime && curthread->t_dtrace_start) 5998 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5999 6000 mstate.dtms_difo = NULL; 6001 mstate.dtms_probe = probe; 6002 mstate.dtms_strtok = 0; 6003 mstate.dtms_arg[0] = arg0; 6004 mstate.dtms_arg[1] = arg1; 6005 mstate.dtms_arg[2] = arg2; 6006 mstate.dtms_arg[3] = arg3; 6007 mstate.dtms_arg[4] = arg4; 6008 6009 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6010 6011 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6012 dtrace_predicate_t *pred = ecb->dte_predicate; 6013 dtrace_state_t *state = ecb->dte_state; 6014 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6015 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6016 dtrace_vstate_t *vstate = &state->dts_vstate; 6017 dtrace_provider_t *prov = probe->dtpr_provider; 6018 int committed = 0; 6019 caddr_t tomax; 6020 6021 /* 6022 * A little subtlety with the following (seemingly innocuous) 6023 * declaration of the automatic 'val': by looking at the 6024 * code, you might think that it could be declared in the 6025 * action processing loop, below. (That is, it's only used in 6026 * the action processing loop.) However, it must be declared 6027 * out of that scope because in the case of DIF expression 6028 * arguments to aggregating actions, one iteration of the 6029 * action loop will use the last iteration's value. 6030 */ 6031 uint64_t val = 0; 6032 6033 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6034 *flags &= ~CPU_DTRACE_ERROR; 6035 6036 if (prov == dtrace_provider) { 6037 /* 6038 * If dtrace itself is the provider of this probe, 6039 * we're only going to continue processing the ECB if 6040 * arg0 (the dtrace_state_t) is equal to the ECB's 6041 * creating state. (This prevents disjoint consumers 6042 * from seeing one another's metaprobes.) 6043 */ 6044 if (arg0 != (uint64_t)(uintptr_t)state) 6045 continue; 6046 } 6047 6048 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6049 /* 6050 * We're not currently active. If our provider isn't 6051 * the dtrace pseudo provider, we're not interested. 6052 */ 6053 if (prov != dtrace_provider) 6054 continue; 6055 6056 /* 6057 * Now we must further check if we are in the BEGIN 6058 * probe. If we are, we will only continue processing 6059 * if we're still in WARMUP -- if one BEGIN enabling 6060 * has invoked the exit() action, we don't want to 6061 * evaluate subsequent BEGIN enablings. 6062 */ 6063 if (probe->dtpr_id == dtrace_probeid_begin && 6064 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6065 ASSERT(state->dts_activity == 6066 DTRACE_ACTIVITY_DRAINING); 6067 continue; 6068 } 6069 } 6070 6071 if (ecb->dte_cond) { 6072 /* 6073 * If the dte_cond bits indicate that this 6074 * consumer is only allowed to see user-mode firings 6075 * of this probe, call the provider's dtps_usermode() 6076 * entry point to check that the probe was fired 6077 * while in a user context. Skip this ECB if that's 6078 * not the case. 6079 */ 6080 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6081 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6082 probe->dtpr_id, probe->dtpr_arg) == 0) 6083 continue; 6084 6085 #if defined(sun) 6086 /* 6087 * This is more subtle than it looks. We have to be 6088 * absolutely certain that CRED() isn't going to 6089 * change out from under us so it's only legit to 6090 * examine that structure if we're in constrained 6091 * situations. Currently, the only times we'll this 6092 * check is if a non-super-user has enabled the 6093 * profile or syscall providers -- providers that 6094 * allow visibility of all processes. For the 6095 * profile case, the check above will ensure that 6096 * we're examining a user context. 6097 */ 6098 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6099 cred_t *cr; 6100 cred_t *s_cr = 6101 ecb->dte_state->dts_cred.dcr_cred; 6102 proc_t *proc; 6103 6104 ASSERT(s_cr != NULL); 6105 6106 if ((cr = CRED()) == NULL || 6107 s_cr->cr_uid != cr->cr_uid || 6108 s_cr->cr_uid != cr->cr_ruid || 6109 s_cr->cr_uid != cr->cr_suid || 6110 s_cr->cr_gid != cr->cr_gid || 6111 s_cr->cr_gid != cr->cr_rgid || 6112 s_cr->cr_gid != cr->cr_sgid || 6113 (proc = ttoproc(curthread)) == NULL || 6114 (proc->p_flag & SNOCD)) 6115 continue; 6116 } 6117 6118 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6119 cred_t *cr; 6120 cred_t *s_cr = 6121 ecb->dte_state->dts_cred.dcr_cred; 6122 6123 ASSERT(s_cr != NULL); 6124 6125 if ((cr = CRED()) == NULL || 6126 s_cr->cr_zone->zone_id != 6127 cr->cr_zone->zone_id) 6128 continue; 6129 } 6130 #endif 6131 } 6132 6133 if (now - state->dts_alive > dtrace_deadman_timeout) { 6134 /* 6135 * We seem to be dead. Unless we (a) have kernel 6136 * destructive permissions (b) have expicitly enabled 6137 * destructive actions and (c) destructive actions have 6138 * not been disabled, we're going to transition into 6139 * the KILLED state, from which no further processing 6140 * on this state will be performed. 6141 */ 6142 if (!dtrace_priv_kernel_destructive(state) || 6143 !state->dts_cred.dcr_destructive || 6144 dtrace_destructive_disallow) { 6145 void *activity = &state->dts_activity; 6146 dtrace_activity_t current; 6147 6148 do { 6149 current = state->dts_activity; 6150 } while (dtrace_cas32(activity, current, 6151 DTRACE_ACTIVITY_KILLED) != current); 6152 6153 continue; 6154 } 6155 } 6156 6157 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6158 ecb->dte_alignment, state, &mstate)) < 0) 6159 continue; 6160 6161 tomax = buf->dtb_tomax; 6162 ASSERT(tomax != NULL); 6163 6164 if (ecb->dte_size != 0) 6165 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6166 6167 mstate.dtms_epid = ecb->dte_epid; 6168 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6169 6170 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6171 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6172 else 6173 mstate.dtms_access = 0; 6174 6175 if (pred != NULL) { 6176 dtrace_difo_t *dp = pred->dtp_difo; 6177 int rval; 6178 6179 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6180 6181 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6182 dtrace_cacheid_t cid = probe->dtpr_predcache; 6183 6184 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6185 /* 6186 * Update the predicate cache... 6187 */ 6188 ASSERT(cid == pred->dtp_cacheid); 6189 curthread->t_predcache = cid; 6190 } 6191 6192 continue; 6193 } 6194 } 6195 6196 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6197 act != NULL; act = act->dta_next) { 6198 size_t valoffs; 6199 dtrace_difo_t *dp; 6200 dtrace_recdesc_t *rec = &act->dta_rec; 6201 6202 size = rec->dtrd_size; 6203 valoffs = offs + rec->dtrd_offset; 6204 6205 if (DTRACEACT_ISAGG(act->dta_kind)) { 6206 uint64_t v = 0xbad; 6207 dtrace_aggregation_t *agg; 6208 6209 agg = (dtrace_aggregation_t *)act; 6210 6211 if ((dp = act->dta_difo) != NULL) 6212 v = dtrace_dif_emulate(dp, 6213 &mstate, vstate, state); 6214 6215 if (*flags & CPU_DTRACE_ERROR) 6216 continue; 6217 6218 /* 6219 * Note that we always pass the expression 6220 * value from the previous iteration of the 6221 * action loop. This value will only be used 6222 * if there is an expression argument to the 6223 * aggregating action, denoted by the 6224 * dtag_hasarg field. 6225 */ 6226 dtrace_aggregate(agg, buf, 6227 offs, aggbuf, v, val); 6228 continue; 6229 } 6230 6231 switch (act->dta_kind) { 6232 case DTRACEACT_STOP: 6233 if (dtrace_priv_proc_destructive(state)) 6234 dtrace_action_stop(); 6235 continue; 6236 6237 case DTRACEACT_BREAKPOINT: 6238 if (dtrace_priv_kernel_destructive(state)) 6239 dtrace_action_breakpoint(ecb); 6240 continue; 6241 6242 case DTRACEACT_PANIC: 6243 if (dtrace_priv_kernel_destructive(state)) 6244 dtrace_action_panic(ecb); 6245 continue; 6246 6247 case DTRACEACT_STACK: 6248 if (!dtrace_priv_kernel(state)) 6249 continue; 6250 6251 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6252 size / sizeof (pc_t), probe->dtpr_aframes, 6253 DTRACE_ANCHORED(probe) ? NULL : 6254 (uint32_t *)arg0); 6255 continue; 6256 6257 case DTRACEACT_JSTACK: 6258 case DTRACEACT_USTACK: 6259 if (!dtrace_priv_proc(state)) 6260 continue; 6261 6262 /* 6263 * See comment in DIF_VAR_PID. 6264 */ 6265 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6266 CPU_ON_INTR(CPU)) { 6267 int depth = DTRACE_USTACK_NFRAMES( 6268 rec->dtrd_arg) + 1; 6269 6270 dtrace_bzero((void *)(tomax + valoffs), 6271 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6272 + depth * sizeof (uint64_t)); 6273 6274 continue; 6275 } 6276 6277 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6278 curproc->p_dtrace_helpers != NULL) { 6279 /* 6280 * This is the slow path -- we have 6281 * allocated string space, and we're 6282 * getting the stack of a process that 6283 * has helpers. Call into a separate 6284 * routine to perform this processing. 6285 */ 6286 dtrace_action_ustack(&mstate, state, 6287 (uint64_t *)(tomax + valoffs), 6288 rec->dtrd_arg); 6289 continue; 6290 } 6291 6292 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6293 dtrace_getupcstack((uint64_t *) 6294 (tomax + valoffs), 6295 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6296 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6297 continue; 6298 6299 default: 6300 break; 6301 } 6302 6303 dp = act->dta_difo; 6304 ASSERT(dp != NULL); 6305 6306 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6307 6308 if (*flags & CPU_DTRACE_ERROR) 6309 continue; 6310 6311 switch (act->dta_kind) { 6312 case DTRACEACT_SPECULATE: 6313 ASSERT(buf == &state->dts_buffer[cpuid]); 6314 buf = dtrace_speculation_buffer(state, 6315 cpuid, val); 6316 6317 if (buf == NULL) { 6318 *flags |= CPU_DTRACE_DROP; 6319 continue; 6320 } 6321 6322 offs = dtrace_buffer_reserve(buf, 6323 ecb->dte_needed, ecb->dte_alignment, 6324 state, NULL); 6325 6326 if (offs < 0) { 6327 *flags |= CPU_DTRACE_DROP; 6328 continue; 6329 } 6330 6331 tomax = buf->dtb_tomax; 6332 ASSERT(tomax != NULL); 6333 6334 if (ecb->dte_size != 0) 6335 DTRACE_STORE(uint32_t, tomax, offs, 6336 ecb->dte_epid); 6337 continue; 6338 6339 case DTRACEACT_PRINTM: { 6340 /* The DIF returns a 'memref'. */ 6341 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6342 6343 /* Get the size from the memref. */ 6344 size = memref[1]; 6345 6346 /* 6347 * Check if the size exceeds the allocated 6348 * buffer size. 6349 */ 6350 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6351 /* Flag a drop! */ 6352 *flags |= CPU_DTRACE_DROP; 6353 continue; 6354 } 6355 6356 /* Store the size in the buffer first. */ 6357 DTRACE_STORE(uintptr_t, tomax, 6358 valoffs, size); 6359 6360 /* 6361 * Offset the buffer address to the start 6362 * of the data. 6363 */ 6364 valoffs += sizeof(uintptr_t); 6365 6366 /* 6367 * Reset to the memory address rather than 6368 * the memref array, then let the BYREF 6369 * code below do the work to store the 6370 * memory data in the buffer. 6371 */ 6372 val = memref[0]; 6373 break; 6374 } 6375 6376 case DTRACEACT_PRINTT: { 6377 /* The DIF returns a 'typeref'. */ 6378 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6379 char c = '\0' + 1; 6380 size_t s; 6381 6382 /* 6383 * Get the type string length and round it 6384 * up so that the data that follows is 6385 * aligned for easy access. 6386 */ 6387 size_t typs = strlen((char *) typeref[2]) + 1; 6388 typs = roundup(typs, sizeof(uintptr_t)); 6389 6390 /* 6391 *Get the size from the typeref using the 6392 * number of elements and the type size. 6393 */ 6394 size = typeref[1] * typeref[3]; 6395 6396 /* 6397 * Check if the size exceeds the allocated 6398 * buffer size. 6399 */ 6400 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6401 /* Flag a drop! */ 6402 *flags |= CPU_DTRACE_DROP; 6403 6404 } 6405 6406 /* Store the size in the buffer first. */ 6407 DTRACE_STORE(uintptr_t, tomax, 6408 valoffs, size); 6409 valoffs += sizeof(uintptr_t); 6410 6411 /* Store the type size in the buffer. */ 6412 DTRACE_STORE(uintptr_t, tomax, 6413 valoffs, typeref[3]); 6414 valoffs += sizeof(uintptr_t); 6415 6416 val = typeref[2]; 6417 6418 for (s = 0; s < typs; s++) { 6419 if (c != '\0') 6420 c = dtrace_load8(val++); 6421 6422 DTRACE_STORE(uint8_t, tomax, 6423 valoffs++, c); 6424 } 6425 6426 /* 6427 * Reset to the memory address rather than 6428 * the typeref array, then let the BYREF 6429 * code below do the work to store the 6430 * memory data in the buffer. 6431 */ 6432 val = typeref[0]; 6433 break; 6434 } 6435 6436 case DTRACEACT_CHILL: 6437 if (dtrace_priv_kernel_destructive(state)) 6438 dtrace_action_chill(&mstate, val); 6439 continue; 6440 6441 case DTRACEACT_RAISE: 6442 if (dtrace_priv_proc_destructive(state)) 6443 dtrace_action_raise(val); 6444 continue; 6445 6446 case DTRACEACT_COMMIT: 6447 ASSERT(!committed); 6448 6449 /* 6450 * We need to commit our buffer state. 6451 */ 6452 if (ecb->dte_size) 6453 buf->dtb_offset = offs + ecb->dte_size; 6454 buf = &state->dts_buffer[cpuid]; 6455 dtrace_speculation_commit(state, cpuid, val); 6456 committed = 1; 6457 continue; 6458 6459 case DTRACEACT_DISCARD: 6460 dtrace_speculation_discard(state, cpuid, val); 6461 continue; 6462 6463 case DTRACEACT_DIFEXPR: 6464 case DTRACEACT_LIBACT: 6465 case DTRACEACT_PRINTF: 6466 case DTRACEACT_PRINTA: 6467 case DTRACEACT_SYSTEM: 6468 case DTRACEACT_FREOPEN: 6469 break; 6470 6471 case DTRACEACT_SYM: 6472 case DTRACEACT_MOD: 6473 if (!dtrace_priv_kernel(state)) 6474 continue; 6475 break; 6476 6477 case DTRACEACT_USYM: 6478 case DTRACEACT_UMOD: 6479 case DTRACEACT_UADDR: { 6480 #if defined(sun) 6481 struct pid *pid = curthread->t_procp->p_pidp; 6482 #endif 6483 6484 if (!dtrace_priv_proc(state)) 6485 continue; 6486 6487 DTRACE_STORE(uint64_t, tomax, 6488 #if defined(sun) 6489 valoffs, (uint64_t)pid->pid_id); 6490 #else 6491 valoffs, (uint64_t) curproc->p_pid); 6492 #endif 6493 DTRACE_STORE(uint64_t, tomax, 6494 valoffs + sizeof (uint64_t), val); 6495 6496 continue; 6497 } 6498 6499 case DTRACEACT_EXIT: { 6500 /* 6501 * For the exit action, we are going to attempt 6502 * to atomically set our activity to be 6503 * draining. If this fails (either because 6504 * another CPU has beat us to the exit action, 6505 * or because our current activity is something 6506 * other than ACTIVE or WARMUP), we will 6507 * continue. This assures that the exit action 6508 * can be successfully recorded at most once 6509 * when we're in the ACTIVE state. If we're 6510 * encountering the exit() action while in 6511 * COOLDOWN, however, we want to honor the new 6512 * status code. (We know that we're the only 6513 * thread in COOLDOWN, so there is no race.) 6514 */ 6515 void *activity = &state->dts_activity; 6516 dtrace_activity_t current = state->dts_activity; 6517 6518 if (current == DTRACE_ACTIVITY_COOLDOWN) 6519 break; 6520 6521 if (current != DTRACE_ACTIVITY_WARMUP) 6522 current = DTRACE_ACTIVITY_ACTIVE; 6523 6524 if (dtrace_cas32(activity, current, 6525 DTRACE_ACTIVITY_DRAINING) != current) { 6526 *flags |= CPU_DTRACE_DROP; 6527 continue; 6528 } 6529 6530 break; 6531 } 6532 6533 default: 6534 ASSERT(0); 6535 } 6536 6537 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6538 uintptr_t end = valoffs + size; 6539 6540 if (!dtrace_vcanload((void *)(uintptr_t)val, 6541 &dp->dtdo_rtype, &mstate, vstate)) 6542 continue; 6543 6544 /* 6545 * If this is a string, we're going to only 6546 * load until we find the zero byte -- after 6547 * which we'll store zero bytes. 6548 */ 6549 if (dp->dtdo_rtype.dtdt_kind == 6550 DIF_TYPE_STRING) { 6551 char c = '\0' + 1; 6552 int intuple = act->dta_intuple; 6553 size_t s; 6554 6555 for (s = 0; s < size; s++) { 6556 if (c != '\0') 6557 c = dtrace_load8(val++); 6558 6559 DTRACE_STORE(uint8_t, tomax, 6560 valoffs++, c); 6561 6562 if (c == '\0' && intuple) 6563 break; 6564 } 6565 6566 continue; 6567 } 6568 6569 while (valoffs < end) { 6570 DTRACE_STORE(uint8_t, tomax, valoffs++, 6571 dtrace_load8(val++)); 6572 } 6573 6574 continue; 6575 } 6576 6577 switch (size) { 6578 case 0: 6579 break; 6580 6581 case sizeof (uint8_t): 6582 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6583 break; 6584 case sizeof (uint16_t): 6585 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6586 break; 6587 case sizeof (uint32_t): 6588 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6589 break; 6590 case sizeof (uint64_t): 6591 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6592 break; 6593 default: 6594 /* 6595 * Any other size should have been returned by 6596 * reference, not by value. 6597 */ 6598 ASSERT(0); 6599 break; 6600 } 6601 } 6602 6603 if (*flags & CPU_DTRACE_DROP) 6604 continue; 6605 6606 if (*flags & CPU_DTRACE_FAULT) { 6607 int ndx; 6608 dtrace_action_t *err; 6609 6610 buf->dtb_errors++; 6611 6612 if (probe->dtpr_id == dtrace_probeid_error) { 6613 /* 6614 * There's nothing we can do -- we had an 6615 * error on the error probe. We bump an 6616 * error counter to at least indicate that 6617 * this condition happened. 6618 */ 6619 dtrace_error(&state->dts_dblerrors); 6620 continue; 6621 } 6622 6623 if (vtime) { 6624 /* 6625 * Before recursing on dtrace_probe(), we 6626 * need to explicitly clear out our start 6627 * time to prevent it from being accumulated 6628 * into t_dtrace_vtime. 6629 */ 6630 curthread->t_dtrace_start = 0; 6631 } 6632 6633 /* 6634 * Iterate over the actions to figure out which action 6635 * we were processing when we experienced the error. 6636 * Note that act points _past_ the faulting action; if 6637 * act is ecb->dte_action, the fault was in the 6638 * predicate, if it's ecb->dte_action->dta_next it's 6639 * in action #1, and so on. 6640 */ 6641 for (err = ecb->dte_action, ndx = 0; 6642 err != act; err = err->dta_next, ndx++) 6643 continue; 6644 6645 dtrace_probe_error(state, ecb->dte_epid, ndx, 6646 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6647 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6648 cpu_core[cpuid].cpuc_dtrace_illval); 6649 6650 continue; 6651 } 6652 6653 if (!committed) 6654 buf->dtb_offset = offs + ecb->dte_size; 6655 } 6656 6657 if (vtime) 6658 curthread->t_dtrace_start = dtrace_gethrtime(); 6659 6660 dtrace_interrupt_enable(cookie); 6661 } 6662 6663 /* 6664 * DTrace Probe Hashing Functions 6665 * 6666 * The functions in this section (and indeed, the functions in remaining 6667 * sections) are not _called_ from probe context. (Any exceptions to this are 6668 * marked with a "Note:".) Rather, they are called from elsewhere in the 6669 * DTrace framework to look-up probes in, add probes to and remove probes from 6670 * the DTrace probe hashes. (Each probe is hashed by each element of the 6671 * probe tuple -- allowing for fast lookups, regardless of what was 6672 * specified.) 6673 */ 6674 static uint_t 6675 dtrace_hash_str(const char *p) 6676 { 6677 unsigned int g; 6678 uint_t hval = 0; 6679 6680 while (*p) { 6681 hval = (hval << 4) + *p++; 6682 if ((g = (hval & 0xf0000000)) != 0) 6683 hval ^= g >> 24; 6684 hval &= ~g; 6685 } 6686 return (hval); 6687 } 6688 6689 static dtrace_hash_t * 6690 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6691 { 6692 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6693 6694 hash->dth_stroffs = stroffs; 6695 hash->dth_nextoffs = nextoffs; 6696 hash->dth_prevoffs = prevoffs; 6697 6698 hash->dth_size = 1; 6699 hash->dth_mask = hash->dth_size - 1; 6700 6701 hash->dth_tab = kmem_zalloc(hash->dth_size * 6702 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6703 6704 return (hash); 6705 } 6706 6707 static void 6708 dtrace_hash_destroy(dtrace_hash_t *hash) 6709 { 6710 #ifdef DEBUG 6711 int i; 6712 6713 for (i = 0; i < hash->dth_size; i++) 6714 ASSERT(hash->dth_tab[i] == NULL); 6715 #endif 6716 6717 kmem_free(hash->dth_tab, 6718 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6719 kmem_free(hash, sizeof (dtrace_hash_t)); 6720 } 6721 6722 static void 6723 dtrace_hash_resize(dtrace_hash_t *hash) 6724 { 6725 int size = hash->dth_size, i, ndx; 6726 int new_size = hash->dth_size << 1; 6727 int new_mask = new_size - 1; 6728 dtrace_hashbucket_t **new_tab, *bucket, *next; 6729 6730 ASSERT((new_size & new_mask) == 0); 6731 6732 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6733 6734 for (i = 0; i < size; i++) { 6735 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6736 dtrace_probe_t *probe = bucket->dthb_chain; 6737 6738 ASSERT(probe != NULL); 6739 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6740 6741 next = bucket->dthb_next; 6742 bucket->dthb_next = new_tab[ndx]; 6743 new_tab[ndx] = bucket; 6744 } 6745 } 6746 6747 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6748 hash->dth_tab = new_tab; 6749 hash->dth_size = new_size; 6750 hash->dth_mask = new_mask; 6751 } 6752 6753 static void 6754 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6755 { 6756 int hashval = DTRACE_HASHSTR(hash, new); 6757 int ndx = hashval & hash->dth_mask; 6758 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6759 dtrace_probe_t **nextp, **prevp; 6760 6761 for (; bucket != NULL; bucket = bucket->dthb_next) { 6762 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6763 goto add; 6764 } 6765 6766 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6767 dtrace_hash_resize(hash); 6768 dtrace_hash_add(hash, new); 6769 return; 6770 } 6771 6772 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6773 bucket->dthb_next = hash->dth_tab[ndx]; 6774 hash->dth_tab[ndx] = bucket; 6775 hash->dth_nbuckets++; 6776 6777 add: 6778 nextp = DTRACE_HASHNEXT(hash, new); 6779 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6780 *nextp = bucket->dthb_chain; 6781 6782 if (bucket->dthb_chain != NULL) { 6783 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6784 ASSERT(*prevp == NULL); 6785 *prevp = new; 6786 } 6787 6788 bucket->dthb_chain = new; 6789 bucket->dthb_len++; 6790 } 6791 6792 static dtrace_probe_t * 6793 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6794 { 6795 int hashval = DTRACE_HASHSTR(hash, template); 6796 int ndx = hashval & hash->dth_mask; 6797 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6798 6799 for (; bucket != NULL; bucket = bucket->dthb_next) { 6800 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6801 return (bucket->dthb_chain); 6802 } 6803 6804 return (NULL); 6805 } 6806 6807 static int 6808 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6809 { 6810 int hashval = DTRACE_HASHSTR(hash, template); 6811 int ndx = hashval & hash->dth_mask; 6812 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6813 6814 for (; bucket != NULL; bucket = bucket->dthb_next) { 6815 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6816 return (bucket->dthb_len); 6817 } 6818 6819 return (0); 6820 } 6821 6822 static void 6823 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6824 { 6825 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6826 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6827 6828 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6829 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6830 6831 /* 6832 * Find the bucket that we're removing this probe from. 6833 */ 6834 for (; bucket != NULL; bucket = bucket->dthb_next) { 6835 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6836 break; 6837 } 6838 6839 ASSERT(bucket != NULL); 6840 6841 if (*prevp == NULL) { 6842 if (*nextp == NULL) { 6843 /* 6844 * The removed probe was the only probe on this 6845 * bucket; we need to remove the bucket. 6846 */ 6847 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6848 6849 ASSERT(bucket->dthb_chain == probe); 6850 ASSERT(b != NULL); 6851 6852 if (b == bucket) { 6853 hash->dth_tab[ndx] = bucket->dthb_next; 6854 } else { 6855 while (b->dthb_next != bucket) 6856 b = b->dthb_next; 6857 b->dthb_next = bucket->dthb_next; 6858 } 6859 6860 ASSERT(hash->dth_nbuckets > 0); 6861 hash->dth_nbuckets--; 6862 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6863 return; 6864 } 6865 6866 bucket->dthb_chain = *nextp; 6867 } else { 6868 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6869 } 6870 6871 if (*nextp != NULL) 6872 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6873 } 6874 6875 /* 6876 * DTrace Utility Functions 6877 * 6878 * These are random utility functions that are _not_ called from probe context. 6879 */ 6880 static int 6881 dtrace_badattr(const dtrace_attribute_t *a) 6882 { 6883 return (a->dtat_name > DTRACE_STABILITY_MAX || 6884 a->dtat_data > DTRACE_STABILITY_MAX || 6885 a->dtat_class > DTRACE_CLASS_MAX); 6886 } 6887 6888 /* 6889 * Return a duplicate copy of a string. If the specified string is NULL, 6890 * this function returns a zero-length string. 6891 */ 6892 static char * 6893 dtrace_strdup(const char *str) 6894 { 6895 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6896 6897 if (str != NULL) 6898 (void) strcpy(new, str); 6899 6900 return (new); 6901 } 6902 6903 #define DTRACE_ISALPHA(c) \ 6904 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6905 6906 static int 6907 dtrace_badname(const char *s) 6908 { 6909 char c; 6910 6911 if (s == NULL || (c = *s++) == '\0') 6912 return (0); 6913 6914 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6915 return (1); 6916 6917 while ((c = *s++) != '\0') { 6918 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6919 c != '-' && c != '_' && c != '.' && c != '`') 6920 return (1); 6921 } 6922 6923 return (0); 6924 } 6925 6926 static void 6927 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6928 { 6929 uint32_t priv; 6930 6931 #if defined(sun) 6932 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6933 /* 6934 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6935 */ 6936 priv = DTRACE_PRIV_ALL; 6937 } else { 6938 *uidp = crgetuid(cr); 6939 *zoneidp = crgetzoneid(cr); 6940 6941 priv = 0; 6942 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6943 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6944 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6945 priv |= DTRACE_PRIV_USER; 6946 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6947 priv |= DTRACE_PRIV_PROC; 6948 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6949 priv |= DTRACE_PRIV_OWNER; 6950 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6951 priv |= DTRACE_PRIV_ZONEOWNER; 6952 } 6953 #else 6954 priv = DTRACE_PRIV_ALL; 6955 #endif 6956 6957 *privp = priv; 6958 } 6959 6960 #ifdef DTRACE_ERRDEBUG 6961 static void 6962 dtrace_errdebug(const char *str) 6963 { 6964 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6965 int occupied = 0; 6966 6967 mutex_enter(&dtrace_errlock); 6968 dtrace_errlast = str; 6969 dtrace_errthread = curthread; 6970 6971 while (occupied++ < DTRACE_ERRHASHSZ) { 6972 if (dtrace_errhash[hval].dter_msg == str) { 6973 dtrace_errhash[hval].dter_count++; 6974 goto out; 6975 } 6976 6977 if (dtrace_errhash[hval].dter_msg != NULL) { 6978 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6979 continue; 6980 } 6981 6982 dtrace_errhash[hval].dter_msg = str; 6983 dtrace_errhash[hval].dter_count = 1; 6984 goto out; 6985 } 6986 6987 panic("dtrace: undersized error hash"); 6988 out: 6989 mutex_exit(&dtrace_errlock); 6990 } 6991 #endif 6992 6993 /* 6994 * DTrace Matching Functions 6995 * 6996 * These functions are used to match groups of probes, given some elements of 6997 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6998 */ 6999 static int 7000 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7001 zoneid_t zoneid) 7002 { 7003 if (priv != DTRACE_PRIV_ALL) { 7004 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7005 uint32_t match = priv & ppriv; 7006 7007 /* 7008 * No PRIV_DTRACE_* privileges... 7009 */ 7010 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7011 DTRACE_PRIV_KERNEL)) == 0) 7012 return (0); 7013 7014 /* 7015 * No matching bits, but there were bits to match... 7016 */ 7017 if (match == 0 && ppriv != 0) 7018 return (0); 7019 7020 /* 7021 * Need to have permissions to the process, but don't... 7022 */ 7023 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7024 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7025 return (0); 7026 } 7027 7028 /* 7029 * Need to be in the same zone unless we possess the 7030 * privilege to examine all zones. 7031 */ 7032 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7033 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7034 return (0); 7035 } 7036 } 7037 7038 return (1); 7039 } 7040 7041 /* 7042 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7043 * consists of input pattern strings and an ops-vector to evaluate them. 7044 * This function returns >0 for match, 0 for no match, and <0 for error. 7045 */ 7046 static int 7047 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7048 uint32_t priv, uid_t uid, zoneid_t zoneid) 7049 { 7050 dtrace_provider_t *pvp = prp->dtpr_provider; 7051 int rv; 7052 7053 if (pvp->dtpv_defunct) 7054 return (0); 7055 7056 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7057 return (rv); 7058 7059 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7060 return (rv); 7061 7062 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7063 return (rv); 7064 7065 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7066 return (rv); 7067 7068 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7069 return (0); 7070 7071 return (rv); 7072 } 7073 7074 /* 7075 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7076 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7077 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7078 * In addition, all of the recursion cases except for '*' matching have been 7079 * unwound. For '*', we still implement recursive evaluation, but a depth 7080 * counter is maintained and matching is aborted if we recurse too deep. 7081 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7082 */ 7083 static int 7084 dtrace_match_glob(const char *s, const char *p, int depth) 7085 { 7086 const char *olds; 7087 char s1, c; 7088 int gs; 7089 7090 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7091 return (-1); 7092 7093 if (s == NULL) 7094 s = ""; /* treat NULL as empty string */ 7095 7096 top: 7097 olds = s; 7098 s1 = *s++; 7099 7100 if (p == NULL) 7101 return (0); 7102 7103 if ((c = *p++) == '\0') 7104 return (s1 == '\0'); 7105 7106 switch (c) { 7107 case '[': { 7108 int ok = 0, notflag = 0; 7109 char lc = '\0'; 7110 7111 if (s1 == '\0') 7112 return (0); 7113 7114 if (*p == '!') { 7115 notflag = 1; 7116 p++; 7117 } 7118 7119 if ((c = *p++) == '\0') 7120 return (0); 7121 7122 do { 7123 if (c == '-' && lc != '\0' && *p != ']') { 7124 if ((c = *p++) == '\0') 7125 return (0); 7126 if (c == '\\' && (c = *p++) == '\0') 7127 return (0); 7128 7129 if (notflag) { 7130 if (s1 < lc || s1 > c) 7131 ok++; 7132 else 7133 return (0); 7134 } else if (lc <= s1 && s1 <= c) 7135 ok++; 7136 7137 } else if (c == '\\' && (c = *p++) == '\0') 7138 return (0); 7139 7140 lc = c; /* save left-hand 'c' for next iteration */ 7141 7142 if (notflag) { 7143 if (s1 != c) 7144 ok++; 7145 else 7146 return (0); 7147 } else if (s1 == c) 7148 ok++; 7149 7150 if ((c = *p++) == '\0') 7151 return (0); 7152 7153 } while (c != ']'); 7154 7155 if (ok) 7156 goto top; 7157 7158 return (0); 7159 } 7160 7161 case '\\': 7162 if ((c = *p++) == '\0') 7163 return (0); 7164 /*FALLTHRU*/ 7165 7166 default: 7167 if (c != s1) 7168 return (0); 7169 /*FALLTHRU*/ 7170 7171 case '?': 7172 if (s1 != '\0') 7173 goto top; 7174 return (0); 7175 7176 case '*': 7177 while (*p == '*') 7178 p++; /* consecutive *'s are identical to a single one */ 7179 7180 if (*p == '\0') 7181 return (1); 7182 7183 for (s = olds; *s != '\0'; s++) { 7184 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7185 return (gs); 7186 } 7187 7188 return (0); 7189 } 7190 } 7191 7192 /*ARGSUSED*/ 7193 static int 7194 dtrace_match_string(const char *s, const char *p, int depth) 7195 { 7196 return (s != NULL && strcmp(s, p) == 0); 7197 } 7198 7199 /*ARGSUSED*/ 7200 static int 7201 dtrace_match_nul(const char *s, const char *p, int depth) 7202 { 7203 return (1); /* always match the empty pattern */ 7204 } 7205 7206 /*ARGSUSED*/ 7207 static int 7208 dtrace_match_nonzero(const char *s, const char *p, int depth) 7209 { 7210 return (s != NULL && s[0] != '\0'); 7211 } 7212 7213 static int 7214 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7215 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7216 { 7217 dtrace_probe_t template, *probe; 7218 dtrace_hash_t *hash = NULL; 7219 int len, best = INT_MAX, nmatched = 0; 7220 dtrace_id_t i; 7221 7222 ASSERT(MUTEX_HELD(&dtrace_lock)); 7223 7224 /* 7225 * If the probe ID is specified in the key, just lookup by ID and 7226 * invoke the match callback once if a matching probe is found. 7227 */ 7228 if (pkp->dtpk_id != DTRACE_IDNONE) { 7229 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7230 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7231 (void) (*matched)(probe, arg); 7232 nmatched++; 7233 } 7234 return (nmatched); 7235 } 7236 7237 template.dtpr_mod = (char *)pkp->dtpk_mod; 7238 template.dtpr_func = (char *)pkp->dtpk_func; 7239 template.dtpr_name = (char *)pkp->dtpk_name; 7240 7241 /* 7242 * We want to find the most distinct of the module name, function 7243 * name, and name. So for each one that is not a glob pattern or 7244 * empty string, we perform a lookup in the corresponding hash and 7245 * use the hash table with the fewest collisions to do our search. 7246 */ 7247 if (pkp->dtpk_mmatch == &dtrace_match_string && 7248 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7249 best = len; 7250 hash = dtrace_bymod; 7251 } 7252 7253 if (pkp->dtpk_fmatch == &dtrace_match_string && 7254 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7255 best = len; 7256 hash = dtrace_byfunc; 7257 } 7258 7259 if (pkp->dtpk_nmatch == &dtrace_match_string && 7260 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7261 best = len; 7262 hash = dtrace_byname; 7263 } 7264 7265 /* 7266 * If we did not select a hash table, iterate over every probe and 7267 * invoke our callback for each one that matches our input probe key. 7268 */ 7269 if (hash == NULL) { 7270 for (i = 0; i < dtrace_nprobes; i++) { 7271 if ((probe = dtrace_probes[i]) == NULL || 7272 dtrace_match_probe(probe, pkp, priv, uid, 7273 zoneid) <= 0) 7274 continue; 7275 7276 nmatched++; 7277 7278 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7279 break; 7280 } 7281 7282 return (nmatched); 7283 } 7284 7285 /* 7286 * If we selected a hash table, iterate over each probe of the same key 7287 * name and invoke the callback for every probe that matches the other 7288 * attributes of our input probe key. 7289 */ 7290 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7291 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7292 7293 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7294 continue; 7295 7296 nmatched++; 7297 7298 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7299 break; 7300 } 7301 7302 return (nmatched); 7303 } 7304 7305 /* 7306 * Return the function pointer dtrace_probecmp() should use to compare the 7307 * specified pattern with a string. For NULL or empty patterns, we select 7308 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7309 * For non-empty non-glob strings, we use dtrace_match_string(). 7310 */ 7311 static dtrace_probekey_f * 7312 dtrace_probekey_func(const char *p) 7313 { 7314 char c; 7315 7316 if (p == NULL || *p == '\0') 7317 return (&dtrace_match_nul); 7318 7319 while ((c = *p++) != '\0') { 7320 if (c == '[' || c == '?' || c == '*' || c == '\\') 7321 return (&dtrace_match_glob); 7322 } 7323 7324 return (&dtrace_match_string); 7325 } 7326 7327 /* 7328 * Build a probe comparison key for use with dtrace_match_probe() from the 7329 * given probe description. By convention, a null key only matches anchored 7330 * probes: if each field is the empty string, reset dtpk_fmatch to 7331 * dtrace_match_nonzero(). 7332 */ 7333 static void 7334 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7335 { 7336 pkp->dtpk_prov = pdp->dtpd_provider; 7337 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7338 7339 pkp->dtpk_mod = pdp->dtpd_mod; 7340 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7341 7342 pkp->dtpk_func = pdp->dtpd_func; 7343 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7344 7345 pkp->dtpk_name = pdp->dtpd_name; 7346 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7347 7348 pkp->dtpk_id = pdp->dtpd_id; 7349 7350 if (pkp->dtpk_id == DTRACE_IDNONE && 7351 pkp->dtpk_pmatch == &dtrace_match_nul && 7352 pkp->dtpk_mmatch == &dtrace_match_nul && 7353 pkp->dtpk_fmatch == &dtrace_match_nul && 7354 pkp->dtpk_nmatch == &dtrace_match_nul) 7355 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7356 } 7357 7358 /* 7359 * DTrace Provider-to-Framework API Functions 7360 * 7361 * These functions implement much of the Provider-to-Framework API, as 7362 * described in <sys/dtrace.h>. The parts of the API not in this section are 7363 * the functions in the API for probe management (found below), and 7364 * dtrace_probe() itself (found above). 7365 */ 7366 7367 /* 7368 * Register the calling provider with the DTrace framework. This should 7369 * generally be called by DTrace providers in their attach(9E) entry point. 7370 */ 7371 int 7372 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7373 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7374 { 7375 dtrace_provider_t *provider; 7376 7377 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7378 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7379 "arguments", name ? name : "<NULL>"); 7380 return (EINVAL); 7381 } 7382 7383 if (name[0] == '\0' || dtrace_badname(name)) { 7384 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7385 "provider name", name); 7386 return (EINVAL); 7387 } 7388 7389 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7390 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7391 pops->dtps_destroy == NULL || 7392 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7393 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7394 "provider ops", name); 7395 return (EINVAL); 7396 } 7397 7398 if (dtrace_badattr(&pap->dtpa_provider) || 7399 dtrace_badattr(&pap->dtpa_mod) || 7400 dtrace_badattr(&pap->dtpa_func) || 7401 dtrace_badattr(&pap->dtpa_name) || 7402 dtrace_badattr(&pap->dtpa_args)) { 7403 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7404 "provider attributes", name); 7405 return (EINVAL); 7406 } 7407 7408 if (priv & ~DTRACE_PRIV_ALL) { 7409 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7410 "privilege attributes", name); 7411 return (EINVAL); 7412 } 7413 7414 if ((priv & DTRACE_PRIV_KERNEL) && 7415 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7416 pops->dtps_usermode == NULL) { 7417 cmn_err(CE_WARN, "failed to register provider '%s': need " 7418 "dtps_usermode() op for given privilege attributes", name); 7419 return (EINVAL); 7420 } 7421 7422 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7423 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7424 (void) strcpy(provider->dtpv_name, name); 7425 7426 provider->dtpv_attr = *pap; 7427 provider->dtpv_priv.dtpp_flags = priv; 7428 if (cr != NULL) { 7429 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7430 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7431 } 7432 provider->dtpv_pops = *pops; 7433 7434 if (pops->dtps_provide == NULL) { 7435 ASSERT(pops->dtps_provide_module != NULL); 7436 provider->dtpv_pops.dtps_provide = 7437 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7438 } 7439 7440 if (pops->dtps_provide_module == NULL) { 7441 ASSERT(pops->dtps_provide != NULL); 7442 provider->dtpv_pops.dtps_provide_module = 7443 (void (*)(void *, modctl_t *))dtrace_nullop; 7444 } 7445 7446 if (pops->dtps_suspend == NULL) { 7447 ASSERT(pops->dtps_resume == NULL); 7448 provider->dtpv_pops.dtps_suspend = 7449 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7450 provider->dtpv_pops.dtps_resume = 7451 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7452 } 7453 7454 provider->dtpv_arg = arg; 7455 *idp = (dtrace_provider_id_t)provider; 7456 7457 if (pops == &dtrace_provider_ops) { 7458 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7459 ASSERT(MUTEX_HELD(&dtrace_lock)); 7460 ASSERT(dtrace_anon.dta_enabling == NULL); 7461 7462 /* 7463 * We make sure that the DTrace provider is at the head of 7464 * the provider chain. 7465 */ 7466 provider->dtpv_next = dtrace_provider; 7467 dtrace_provider = provider; 7468 return (0); 7469 } 7470 7471 mutex_enter(&dtrace_provider_lock); 7472 mutex_enter(&dtrace_lock); 7473 7474 /* 7475 * If there is at least one provider registered, we'll add this 7476 * provider after the first provider. 7477 */ 7478 if (dtrace_provider != NULL) { 7479 provider->dtpv_next = dtrace_provider->dtpv_next; 7480 dtrace_provider->dtpv_next = provider; 7481 } else { 7482 dtrace_provider = provider; 7483 } 7484 7485 if (dtrace_retained != NULL) { 7486 dtrace_enabling_provide(provider); 7487 7488 /* 7489 * Now we need to call dtrace_enabling_matchall() -- which 7490 * will acquire cpu_lock and dtrace_lock. We therefore need 7491 * to drop all of our locks before calling into it... 7492 */ 7493 mutex_exit(&dtrace_lock); 7494 mutex_exit(&dtrace_provider_lock); 7495 dtrace_enabling_matchall(); 7496 7497 return (0); 7498 } 7499 7500 mutex_exit(&dtrace_lock); 7501 mutex_exit(&dtrace_provider_lock); 7502 7503 return (0); 7504 } 7505 7506 /* 7507 * Unregister the specified provider from the DTrace framework. This should 7508 * generally be called by DTrace providers in their detach(9E) entry point. 7509 */ 7510 int 7511 dtrace_unregister(dtrace_provider_id_t id) 7512 { 7513 dtrace_provider_t *old = (dtrace_provider_t *)id; 7514 dtrace_provider_t *prev = NULL; 7515 int i, self = 0; 7516 dtrace_probe_t *probe, *first = NULL; 7517 7518 if (old->dtpv_pops.dtps_enable == 7519 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7520 /* 7521 * If DTrace itself is the provider, we're called with locks 7522 * already held. 7523 */ 7524 ASSERT(old == dtrace_provider); 7525 #if defined(sun) 7526 ASSERT(dtrace_devi != NULL); 7527 #endif 7528 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7529 ASSERT(MUTEX_HELD(&dtrace_lock)); 7530 self = 1; 7531 7532 if (dtrace_provider->dtpv_next != NULL) { 7533 /* 7534 * There's another provider here; return failure. 7535 */ 7536 return (EBUSY); 7537 } 7538 } else { 7539 mutex_enter(&dtrace_provider_lock); 7540 mutex_enter(&mod_lock); 7541 mutex_enter(&dtrace_lock); 7542 } 7543 7544 /* 7545 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7546 * probes, we refuse to let providers slither away, unless this 7547 * provider has already been explicitly invalidated. 7548 */ 7549 if (!old->dtpv_defunct && 7550 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7551 dtrace_anon.dta_state->dts_necbs > 0))) { 7552 if (!self) { 7553 mutex_exit(&dtrace_lock); 7554 mutex_exit(&mod_lock); 7555 mutex_exit(&dtrace_provider_lock); 7556 } 7557 return (EBUSY); 7558 } 7559 7560 /* 7561 * Attempt to destroy the probes associated with this provider. 7562 */ 7563 for (i = 0; i < dtrace_nprobes; i++) { 7564 if ((probe = dtrace_probes[i]) == NULL) 7565 continue; 7566 7567 if (probe->dtpr_provider != old) 7568 continue; 7569 7570 if (probe->dtpr_ecb == NULL) 7571 continue; 7572 7573 /* 7574 * We have at least one ECB; we can't remove this provider. 7575 */ 7576 if (!self) { 7577 mutex_exit(&dtrace_lock); 7578 mutex_exit(&mod_lock); 7579 mutex_exit(&dtrace_provider_lock); 7580 } 7581 return (EBUSY); 7582 } 7583 7584 /* 7585 * All of the probes for this provider are disabled; we can safely 7586 * remove all of them from their hash chains and from the probe array. 7587 */ 7588 for (i = 0; i < dtrace_nprobes; i++) { 7589 if ((probe = dtrace_probes[i]) == NULL) 7590 continue; 7591 7592 if (probe->dtpr_provider != old) 7593 continue; 7594 7595 dtrace_probes[i] = NULL; 7596 7597 dtrace_hash_remove(dtrace_bymod, probe); 7598 dtrace_hash_remove(dtrace_byfunc, probe); 7599 dtrace_hash_remove(dtrace_byname, probe); 7600 7601 if (first == NULL) { 7602 first = probe; 7603 probe->dtpr_nextmod = NULL; 7604 } else { 7605 probe->dtpr_nextmod = first; 7606 first = probe; 7607 } 7608 } 7609 7610 /* 7611 * The provider's probes have been removed from the hash chains and 7612 * from the probe array. Now issue a dtrace_sync() to be sure that 7613 * everyone has cleared out from any probe array processing. 7614 */ 7615 dtrace_sync(); 7616 7617 for (probe = first; probe != NULL; probe = first) { 7618 first = probe->dtpr_nextmod; 7619 7620 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7621 probe->dtpr_arg); 7622 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7623 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7624 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7625 #if defined(sun) 7626 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7627 #else 7628 free_unr(dtrace_arena, probe->dtpr_id); 7629 #endif 7630 kmem_free(probe, sizeof (dtrace_probe_t)); 7631 } 7632 7633 if ((prev = dtrace_provider) == old) { 7634 #if defined(sun) 7635 ASSERT(self || dtrace_devi == NULL); 7636 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7637 #endif 7638 dtrace_provider = old->dtpv_next; 7639 } else { 7640 while (prev != NULL && prev->dtpv_next != old) 7641 prev = prev->dtpv_next; 7642 7643 if (prev == NULL) { 7644 panic("attempt to unregister non-existent " 7645 "dtrace provider %p\n", (void *)id); 7646 } 7647 7648 prev->dtpv_next = old->dtpv_next; 7649 } 7650 7651 if (!self) { 7652 mutex_exit(&dtrace_lock); 7653 mutex_exit(&mod_lock); 7654 mutex_exit(&dtrace_provider_lock); 7655 } 7656 7657 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7658 kmem_free(old, sizeof (dtrace_provider_t)); 7659 7660 return (0); 7661 } 7662 7663 /* 7664 * Invalidate the specified provider. All subsequent probe lookups for the 7665 * specified provider will fail, but its probes will not be removed. 7666 */ 7667 void 7668 dtrace_invalidate(dtrace_provider_id_t id) 7669 { 7670 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7671 7672 ASSERT(pvp->dtpv_pops.dtps_enable != 7673 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7674 7675 mutex_enter(&dtrace_provider_lock); 7676 mutex_enter(&dtrace_lock); 7677 7678 pvp->dtpv_defunct = 1; 7679 7680 mutex_exit(&dtrace_lock); 7681 mutex_exit(&dtrace_provider_lock); 7682 } 7683 7684 /* 7685 * Indicate whether or not DTrace has attached. 7686 */ 7687 int 7688 dtrace_attached(void) 7689 { 7690 /* 7691 * dtrace_provider will be non-NULL iff the DTrace driver has 7692 * attached. (It's non-NULL because DTrace is always itself a 7693 * provider.) 7694 */ 7695 return (dtrace_provider != NULL); 7696 } 7697 7698 /* 7699 * Remove all the unenabled probes for the given provider. This function is 7700 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7701 * -- just as many of its associated probes as it can. 7702 */ 7703 int 7704 dtrace_condense(dtrace_provider_id_t id) 7705 { 7706 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7707 int i; 7708 dtrace_probe_t *probe; 7709 7710 /* 7711 * Make sure this isn't the dtrace provider itself. 7712 */ 7713 ASSERT(prov->dtpv_pops.dtps_enable != 7714 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7715 7716 mutex_enter(&dtrace_provider_lock); 7717 mutex_enter(&dtrace_lock); 7718 7719 /* 7720 * Attempt to destroy the probes associated with this provider. 7721 */ 7722 for (i = 0; i < dtrace_nprobes; i++) { 7723 if ((probe = dtrace_probes[i]) == NULL) 7724 continue; 7725 7726 if (probe->dtpr_provider != prov) 7727 continue; 7728 7729 if (probe->dtpr_ecb != NULL) 7730 continue; 7731 7732 dtrace_probes[i] = NULL; 7733 7734 dtrace_hash_remove(dtrace_bymod, probe); 7735 dtrace_hash_remove(dtrace_byfunc, probe); 7736 dtrace_hash_remove(dtrace_byname, probe); 7737 7738 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7739 probe->dtpr_arg); 7740 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7741 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7742 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7743 kmem_free(probe, sizeof (dtrace_probe_t)); 7744 #if defined(sun) 7745 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7746 #else 7747 free_unr(dtrace_arena, i + 1); 7748 #endif 7749 } 7750 7751 mutex_exit(&dtrace_lock); 7752 mutex_exit(&dtrace_provider_lock); 7753 7754 return (0); 7755 } 7756 7757 /* 7758 * DTrace Probe Management Functions 7759 * 7760 * The functions in this section perform the DTrace probe management, 7761 * including functions to create probes, look-up probes, and call into the 7762 * providers to request that probes be provided. Some of these functions are 7763 * in the Provider-to-Framework API; these functions can be identified by the 7764 * fact that they are not declared "static". 7765 */ 7766 7767 /* 7768 * Create a probe with the specified module name, function name, and name. 7769 */ 7770 dtrace_id_t 7771 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7772 const char *func, const char *name, int aframes, void *arg) 7773 { 7774 dtrace_probe_t *probe, **probes; 7775 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7776 dtrace_id_t id; 7777 7778 if (provider == dtrace_provider) { 7779 ASSERT(MUTEX_HELD(&dtrace_lock)); 7780 } else { 7781 mutex_enter(&dtrace_lock); 7782 } 7783 7784 #if defined(sun) 7785 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7786 VM_BESTFIT | VM_SLEEP); 7787 #else 7788 id = alloc_unr(dtrace_arena); 7789 #endif 7790 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7791 7792 probe->dtpr_id = id; 7793 probe->dtpr_gen = dtrace_probegen++; 7794 probe->dtpr_mod = dtrace_strdup(mod); 7795 probe->dtpr_func = dtrace_strdup(func); 7796 probe->dtpr_name = dtrace_strdup(name); 7797 probe->dtpr_arg = arg; 7798 probe->dtpr_aframes = aframes; 7799 probe->dtpr_provider = provider; 7800 7801 dtrace_hash_add(dtrace_bymod, probe); 7802 dtrace_hash_add(dtrace_byfunc, probe); 7803 dtrace_hash_add(dtrace_byname, probe); 7804 7805 if (id - 1 >= dtrace_nprobes) { 7806 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7807 size_t nsize = osize << 1; 7808 7809 if (nsize == 0) { 7810 ASSERT(osize == 0); 7811 ASSERT(dtrace_probes == NULL); 7812 nsize = sizeof (dtrace_probe_t *); 7813 } 7814 7815 probes = kmem_zalloc(nsize, KM_SLEEP); 7816 7817 if (dtrace_probes == NULL) { 7818 ASSERT(osize == 0); 7819 dtrace_probes = probes; 7820 dtrace_nprobes = 1; 7821 } else { 7822 dtrace_probe_t **oprobes = dtrace_probes; 7823 7824 bcopy(oprobes, probes, osize); 7825 dtrace_membar_producer(); 7826 dtrace_probes = probes; 7827 7828 dtrace_sync(); 7829 7830 /* 7831 * All CPUs are now seeing the new probes array; we can 7832 * safely free the old array. 7833 */ 7834 kmem_free(oprobes, osize); 7835 dtrace_nprobes <<= 1; 7836 } 7837 7838 ASSERT(id - 1 < dtrace_nprobes); 7839 } 7840 7841 ASSERT(dtrace_probes[id - 1] == NULL); 7842 dtrace_probes[id - 1] = probe; 7843 7844 if (provider != dtrace_provider) 7845 mutex_exit(&dtrace_lock); 7846 7847 return (id); 7848 } 7849 7850 static dtrace_probe_t * 7851 dtrace_probe_lookup_id(dtrace_id_t id) 7852 { 7853 ASSERT(MUTEX_HELD(&dtrace_lock)); 7854 7855 if (id == 0 || id > dtrace_nprobes) 7856 return (NULL); 7857 7858 return (dtrace_probes[id - 1]); 7859 } 7860 7861 static int 7862 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7863 { 7864 *((dtrace_id_t *)arg) = probe->dtpr_id; 7865 7866 return (DTRACE_MATCH_DONE); 7867 } 7868 7869 /* 7870 * Look up a probe based on provider and one or more of module name, function 7871 * name and probe name. 7872 */ 7873 dtrace_id_t 7874 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7875 char *func, char *name) 7876 { 7877 dtrace_probekey_t pkey; 7878 dtrace_id_t id; 7879 int match; 7880 7881 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7882 pkey.dtpk_pmatch = &dtrace_match_string; 7883 pkey.dtpk_mod = mod; 7884 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7885 pkey.dtpk_func = func; 7886 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7887 pkey.dtpk_name = name; 7888 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7889 pkey.dtpk_id = DTRACE_IDNONE; 7890 7891 mutex_enter(&dtrace_lock); 7892 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7893 dtrace_probe_lookup_match, &id); 7894 mutex_exit(&dtrace_lock); 7895 7896 ASSERT(match == 1 || match == 0); 7897 return (match ? id : 0); 7898 } 7899 7900 /* 7901 * Returns the probe argument associated with the specified probe. 7902 */ 7903 void * 7904 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7905 { 7906 dtrace_probe_t *probe; 7907 void *rval = NULL; 7908 7909 mutex_enter(&dtrace_lock); 7910 7911 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7912 probe->dtpr_provider == (dtrace_provider_t *)id) 7913 rval = probe->dtpr_arg; 7914 7915 mutex_exit(&dtrace_lock); 7916 7917 return (rval); 7918 } 7919 7920 /* 7921 * Copy a probe into a probe description. 7922 */ 7923 static void 7924 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7925 { 7926 bzero(pdp, sizeof (dtrace_probedesc_t)); 7927 pdp->dtpd_id = prp->dtpr_id; 7928 7929 (void) strncpy(pdp->dtpd_provider, 7930 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7931 7932 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7933 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7934 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7935 } 7936 7937 #if !defined(sun) 7938 static int 7939 dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7940 { 7941 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7942 7943 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7944 7945 return(0); 7946 } 7947 #endif 7948 7949 7950 /* 7951 * Called to indicate that a probe -- or probes -- should be provided by a 7952 * specfied provider. If the specified description is NULL, the provider will 7953 * be told to provide all of its probes. (This is done whenever a new 7954 * consumer comes along, or whenever a retained enabling is to be matched.) If 7955 * the specified description is non-NULL, the provider is given the 7956 * opportunity to dynamically provide the specified probe, allowing providers 7957 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7958 * probes.) If the provider is NULL, the operations will be applied to all 7959 * providers; if the provider is non-NULL the operations will only be applied 7960 * to the specified provider. The dtrace_provider_lock must be held, and the 7961 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7962 * will need to grab the dtrace_lock when it reenters the framework through 7963 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7964 */ 7965 static void 7966 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7967 { 7968 #if defined(sun) 7969 modctl_t *ctl; 7970 #endif 7971 int all = 0; 7972 7973 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7974 7975 if (prv == NULL) { 7976 all = 1; 7977 prv = dtrace_provider; 7978 } 7979 7980 do { 7981 /* 7982 * First, call the blanket provide operation. 7983 */ 7984 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7985 7986 /* 7987 * Now call the per-module provide operation. We will grab 7988 * mod_lock to prevent the list from being modified. Note 7989 * that this also prevents the mod_busy bits from changing. 7990 * (mod_busy can only be changed with mod_lock held.) 7991 */ 7992 mutex_enter(&mod_lock); 7993 7994 #if defined(sun) 7995 ctl = &modules; 7996 do { 7997 if (ctl->mod_busy || ctl->mod_mp == NULL) 7998 continue; 7999 8000 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8001 8002 } while ((ctl = ctl->mod_next) != &modules); 8003 #else 8004 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 8005 #endif 8006 8007 mutex_exit(&mod_lock); 8008 } while (all && (prv = prv->dtpv_next) != NULL); 8009 } 8010 8011 #if defined(sun) 8012 /* 8013 * Iterate over each probe, and call the Framework-to-Provider API function 8014 * denoted by offs. 8015 */ 8016 static void 8017 dtrace_probe_foreach(uintptr_t offs) 8018 { 8019 dtrace_provider_t *prov; 8020 void (*func)(void *, dtrace_id_t, void *); 8021 dtrace_probe_t *probe; 8022 dtrace_icookie_t cookie; 8023 int i; 8024 8025 /* 8026 * We disable interrupts to walk through the probe array. This is 8027 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8028 * won't see stale data. 8029 */ 8030 cookie = dtrace_interrupt_disable(); 8031 8032 for (i = 0; i < dtrace_nprobes; i++) { 8033 if ((probe = dtrace_probes[i]) == NULL) 8034 continue; 8035 8036 if (probe->dtpr_ecb == NULL) { 8037 /* 8038 * This probe isn't enabled -- don't call the function. 8039 */ 8040 continue; 8041 } 8042 8043 prov = probe->dtpr_provider; 8044 func = *((void(**)(void *, dtrace_id_t, void *)) 8045 ((uintptr_t)&prov->dtpv_pops + offs)); 8046 8047 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8048 } 8049 8050 dtrace_interrupt_enable(cookie); 8051 } 8052 #endif 8053 8054 static int 8055 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8056 { 8057 dtrace_probekey_t pkey; 8058 uint32_t priv; 8059 uid_t uid; 8060 zoneid_t zoneid; 8061 8062 ASSERT(MUTEX_HELD(&dtrace_lock)); 8063 dtrace_ecb_create_cache = NULL; 8064 8065 if (desc == NULL) { 8066 /* 8067 * If we're passed a NULL description, we're being asked to 8068 * create an ECB with a NULL probe. 8069 */ 8070 (void) dtrace_ecb_create_enable(NULL, enab); 8071 return (0); 8072 } 8073 8074 dtrace_probekey(desc, &pkey); 8075 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8076 &priv, &uid, &zoneid); 8077 8078 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8079 enab)); 8080 } 8081 8082 /* 8083 * DTrace Helper Provider Functions 8084 */ 8085 static void 8086 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8087 { 8088 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8089 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8090 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8091 } 8092 8093 static void 8094 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8095 const dof_provider_t *dofprov, char *strtab) 8096 { 8097 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8098 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8099 dofprov->dofpv_provattr); 8100 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8101 dofprov->dofpv_modattr); 8102 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8103 dofprov->dofpv_funcattr); 8104 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8105 dofprov->dofpv_nameattr); 8106 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8107 dofprov->dofpv_argsattr); 8108 } 8109 8110 static void 8111 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8112 { 8113 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8114 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8115 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8116 dof_provider_t *provider; 8117 dof_probe_t *probe; 8118 uint32_t *off, *enoff; 8119 uint8_t *arg; 8120 char *strtab; 8121 uint_t i, nprobes; 8122 dtrace_helper_provdesc_t dhpv; 8123 dtrace_helper_probedesc_t dhpb; 8124 dtrace_meta_t *meta = dtrace_meta_pid; 8125 dtrace_mops_t *mops = &meta->dtm_mops; 8126 void *parg; 8127 8128 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8129 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8130 provider->dofpv_strtab * dof->dofh_secsize); 8131 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8132 provider->dofpv_probes * dof->dofh_secsize); 8133 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8134 provider->dofpv_prargs * dof->dofh_secsize); 8135 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8136 provider->dofpv_proffs * dof->dofh_secsize); 8137 8138 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8139 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8140 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8141 enoff = NULL; 8142 8143 /* 8144 * See dtrace_helper_provider_validate(). 8145 */ 8146 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8147 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8148 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8149 provider->dofpv_prenoffs * dof->dofh_secsize); 8150 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8151 } 8152 8153 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8154 8155 /* 8156 * Create the provider. 8157 */ 8158 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8159 8160 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8161 return; 8162 8163 meta->dtm_count++; 8164 8165 /* 8166 * Create the probes. 8167 */ 8168 for (i = 0; i < nprobes; i++) { 8169 probe = (dof_probe_t *)(uintptr_t)(daddr + 8170 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8171 8172 dhpb.dthpb_mod = dhp->dofhp_mod; 8173 dhpb.dthpb_func = strtab + probe->dofpr_func; 8174 dhpb.dthpb_name = strtab + probe->dofpr_name; 8175 dhpb.dthpb_base = probe->dofpr_addr; 8176 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8177 dhpb.dthpb_noffs = probe->dofpr_noffs; 8178 if (enoff != NULL) { 8179 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8180 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8181 } else { 8182 dhpb.dthpb_enoffs = NULL; 8183 dhpb.dthpb_nenoffs = 0; 8184 } 8185 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8186 dhpb.dthpb_nargc = probe->dofpr_nargc; 8187 dhpb.dthpb_xargc = probe->dofpr_xargc; 8188 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8189 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8190 8191 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8192 } 8193 } 8194 8195 static void 8196 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8197 { 8198 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8199 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8200 int i; 8201 8202 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8203 8204 for (i = 0; i < dof->dofh_secnum; i++) { 8205 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8206 dof->dofh_secoff + i * dof->dofh_secsize); 8207 8208 if (sec->dofs_type != DOF_SECT_PROVIDER) 8209 continue; 8210 8211 dtrace_helper_provide_one(dhp, sec, pid); 8212 } 8213 8214 /* 8215 * We may have just created probes, so we must now rematch against 8216 * any retained enablings. Note that this call will acquire both 8217 * cpu_lock and dtrace_lock; the fact that we are holding 8218 * dtrace_meta_lock now is what defines the ordering with respect to 8219 * these three locks. 8220 */ 8221 dtrace_enabling_matchall(); 8222 } 8223 8224 static void 8225 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8226 { 8227 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8228 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8229 dof_sec_t *str_sec; 8230 dof_provider_t *provider; 8231 char *strtab; 8232 dtrace_helper_provdesc_t dhpv; 8233 dtrace_meta_t *meta = dtrace_meta_pid; 8234 dtrace_mops_t *mops = &meta->dtm_mops; 8235 8236 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8237 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8238 provider->dofpv_strtab * dof->dofh_secsize); 8239 8240 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8241 8242 /* 8243 * Create the provider. 8244 */ 8245 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8246 8247 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8248 8249 meta->dtm_count--; 8250 } 8251 8252 static void 8253 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8254 { 8255 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8256 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8257 int i; 8258 8259 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8260 8261 for (i = 0; i < dof->dofh_secnum; i++) { 8262 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8263 dof->dofh_secoff + i * dof->dofh_secsize); 8264 8265 if (sec->dofs_type != DOF_SECT_PROVIDER) 8266 continue; 8267 8268 dtrace_helper_provider_remove_one(dhp, sec, pid); 8269 } 8270 } 8271 8272 /* 8273 * DTrace Meta Provider-to-Framework API Functions 8274 * 8275 * These functions implement the Meta Provider-to-Framework API, as described 8276 * in <sys/dtrace.h>. 8277 */ 8278 int 8279 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8280 dtrace_meta_provider_id_t *idp) 8281 { 8282 dtrace_meta_t *meta; 8283 dtrace_helpers_t *help, *next; 8284 int i; 8285 8286 *idp = DTRACE_METAPROVNONE; 8287 8288 /* 8289 * We strictly don't need the name, but we hold onto it for 8290 * debuggability. All hail error queues! 8291 */ 8292 if (name == NULL) { 8293 cmn_err(CE_WARN, "failed to register meta-provider: " 8294 "invalid name"); 8295 return (EINVAL); 8296 } 8297 8298 if (mops == NULL || 8299 mops->dtms_create_probe == NULL || 8300 mops->dtms_provide_pid == NULL || 8301 mops->dtms_remove_pid == NULL) { 8302 cmn_err(CE_WARN, "failed to register meta-register %s: " 8303 "invalid ops", name); 8304 return (EINVAL); 8305 } 8306 8307 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8308 meta->dtm_mops = *mops; 8309 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8310 (void) strcpy(meta->dtm_name, name); 8311 meta->dtm_arg = arg; 8312 8313 mutex_enter(&dtrace_meta_lock); 8314 mutex_enter(&dtrace_lock); 8315 8316 if (dtrace_meta_pid != NULL) { 8317 mutex_exit(&dtrace_lock); 8318 mutex_exit(&dtrace_meta_lock); 8319 cmn_err(CE_WARN, "failed to register meta-register %s: " 8320 "user-land meta-provider exists", name); 8321 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8322 kmem_free(meta, sizeof (dtrace_meta_t)); 8323 return (EINVAL); 8324 } 8325 8326 dtrace_meta_pid = meta; 8327 *idp = (dtrace_meta_provider_id_t)meta; 8328 8329 /* 8330 * If there are providers and probes ready to go, pass them 8331 * off to the new meta provider now. 8332 */ 8333 8334 help = dtrace_deferred_pid; 8335 dtrace_deferred_pid = NULL; 8336 8337 mutex_exit(&dtrace_lock); 8338 8339 while (help != NULL) { 8340 for (i = 0; i < help->dthps_nprovs; i++) { 8341 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8342 help->dthps_pid); 8343 } 8344 8345 next = help->dthps_next; 8346 help->dthps_next = NULL; 8347 help->dthps_prev = NULL; 8348 help->dthps_deferred = 0; 8349 help = next; 8350 } 8351 8352 mutex_exit(&dtrace_meta_lock); 8353 8354 return (0); 8355 } 8356 8357 int 8358 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8359 { 8360 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8361 8362 mutex_enter(&dtrace_meta_lock); 8363 mutex_enter(&dtrace_lock); 8364 8365 if (old == dtrace_meta_pid) { 8366 pp = &dtrace_meta_pid; 8367 } else { 8368 panic("attempt to unregister non-existent " 8369 "dtrace meta-provider %p\n", (void *)old); 8370 } 8371 8372 if (old->dtm_count != 0) { 8373 mutex_exit(&dtrace_lock); 8374 mutex_exit(&dtrace_meta_lock); 8375 return (EBUSY); 8376 } 8377 8378 *pp = NULL; 8379 8380 mutex_exit(&dtrace_lock); 8381 mutex_exit(&dtrace_meta_lock); 8382 8383 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8384 kmem_free(old, sizeof (dtrace_meta_t)); 8385 8386 return (0); 8387 } 8388 8389 8390 /* 8391 * DTrace DIF Object Functions 8392 */ 8393 static int 8394 dtrace_difo_err(uint_t pc, const char *format, ...) 8395 { 8396 if (dtrace_err_verbose) { 8397 va_list alist; 8398 8399 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8400 va_start(alist, format); 8401 (void) vuprintf(format, alist); 8402 va_end(alist); 8403 } 8404 8405 #ifdef DTRACE_ERRDEBUG 8406 dtrace_errdebug(format); 8407 #endif 8408 return (1); 8409 } 8410 8411 /* 8412 * Validate a DTrace DIF object by checking the IR instructions. The following 8413 * rules are currently enforced by dtrace_difo_validate(): 8414 * 8415 * 1. Each instruction must have a valid opcode 8416 * 2. Each register, string, variable, or subroutine reference must be valid 8417 * 3. No instruction can modify register %r0 (must be zero) 8418 * 4. All instruction reserved bits must be set to zero 8419 * 5. The last instruction must be a "ret" instruction 8420 * 6. All branch targets must reference a valid instruction _after_ the branch 8421 */ 8422 static int 8423 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8424 cred_t *cr) 8425 { 8426 int err = 0, i; 8427 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8428 int kcheckload; 8429 uint_t pc; 8430 8431 kcheckload = cr == NULL || 8432 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8433 8434 dp->dtdo_destructive = 0; 8435 8436 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8437 dif_instr_t instr = dp->dtdo_buf[pc]; 8438 8439 uint_t r1 = DIF_INSTR_R1(instr); 8440 uint_t r2 = DIF_INSTR_R2(instr); 8441 uint_t rd = DIF_INSTR_RD(instr); 8442 uint_t rs = DIF_INSTR_RS(instr); 8443 uint_t label = DIF_INSTR_LABEL(instr); 8444 uint_t v = DIF_INSTR_VAR(instr); 8445 uint_t subr = DIF_INSTR_SUBR(instr); 8446 uint_t type = DIF_INSTR_TYPE(instr); 8447 uint_t op = DIF_INSTR_OP(instr); 8448 8449 switch (op) { 8450 case DIF_OP_OR: 8451 case DIF_OP_XOR: 8452 case DIF_OP_AND: 8453 case DIF_OP_SLL: 8454 case DIF_OP_SRL: 8455 case DIF_OP_SRA: 8456 case DIF_OP_SUB: 8457 case DIF_OP_ADD: 8458 case DIF_OP_MUL: 8459 case DIF_OP_SDIV: 8460 case DIF_OP_UDIV: 8461 case DIF_OP_SREM: 8462 case DIF_OP_UREM: 8463 case DIF_OP_COPYS: 8464 if (r1 >= nregs) 8465 err += efunc(pc, "invalid register %u\n", r1); 8466 if (r2 >= nregs) 8467 err += efunc(pc, "invalid register %u\n", r2); 8468 if (rd >= nregs) 8469 err += efunc(pc, "invalid register %u\n", rd); 8470 if (rd == 0) 8471 err += efunc(pc, "cannot write to %r0\n"); 8472 break; 8473 case DIF_OP_NOT: 8474 case DIF_OP_MOV: 8475 case DIF_OP_ALLOCS: 8476 if (r1 >= nregs) 8477 err += efunc(pc, "invalid register %u\n", r1); 8478 if (r2 != 0) 8479 err += efunc(pc, "non-zero reserved bits\n"); 8480 if (rd >= nregs) 8481 err += efunc(pc, "invalid register %u\n", rd); 8482 if (rd == 0) 8483 err += efunc(pc, "cannot write to %r0\n"); 8484 break; 8485 case DIF_OP_LDSB: 8486 case DIF_OP_LDSH: 8487 case DIF_OP_LDSW: 8488 case DIF_OP_LDUB: 8489 case DIF_OP_LDUH: 8490 case DIF_OP_LDUW: 8491 case DIF_OP_LDX: 8492 if (r1 >= nregs) 8493 err += efunc(pc, "invalid register %u\n", r1); 8494 if (r2 != 0) 8495 err += efunc(pc, "non-zero reserved bits\n"); 8496 if (rd >= nregs) 8497 err += efunc(pc, "invalid register %u\n", rd); 8498 if (rd == 0) 8499 err += efunc(pc, "cannot write to %r0\n"); 8500 if (kcheckload) 8501 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8502 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8503 break; 8504 case DIF_OP_RLDSB: 8505 case DIF_OP_RLDSH: 8506 case DIF_OP_RLDSW: 8507 case DIF_OP_RLDUB: 8508 case DIF_OP_RLDUH: 8509 case DIF_OP_RLDUW: 8510 case DIF_OP_RLDX: 8511 if (r1 >= nregs) 8512 err += efunc(pc, "invalid register %u\n", r1); 8513 if (r2 != 0) 8514 err += efunc(pc, "non-zero reserved bits\n"); 8515 if (rd >= nregs) 8516 err += efunc(pc, "invalid register %u\n", rd); 8517 if (rd == 0) 8518 err += efunc(pc, "cannot write to %r0\n"); 8519 break; 8520 case DIF_OP_ULDSB: 8521 case DIF_OP_ULDSH: 8522 case DIF_OP_ULDSW: 8523 case DIF_OP_ULDUB: 8524 case DIF_OP_ULDUH: 8525 case DIF_OP_ULDUW: 8526 case DIF_OP_ULDX: 8527 if (r1 >= nregs) 8528 err += efunc(pc, "invalid register %u\n", r1); 8529 if (r2 != 0) 8530 err += efunc(pc, "non-zero reserved bits\n"); 8531 if (rd >= nregs) 8532 err += efunc(pc, "invalid register %u\n", rd); 8533 if (rd == 0) 8534 err += efunc(pc, "cannot write to %r0\n"); 8535 break; 8536 case DIF_OP_STB: 8537 case DIF_OP_STH: 8538 case DIF_OP_STW: 8539 case DIF_OP_STX: 8540 if (r1 >= nregs) 8541 err += efunc(pc, "invalid register %u\n", r1); 8542 if (r2 != 0) 8543 err += efunc(pc, "non-zero reserved bits\n"); 8544 if (rd >= nregs) 8545 err += efunc(pc, "invalid register %u\n", rd); 8546 if (rd == 0) 8547 err += efunc(pc, "cannot write to 0 address\n"); 8548 break; 8549 case DIF_OP_CMP: 8550 case DIF_OP_SCMP: 8551 if (r1 >= nregs) 8552 err += efunc(pc, "invalid register %u\n", r1); 8553 if (r2 >= nregs) 8554 err += efunc(pc, "invalid register %u\n", r2); 8555 if (rd != 0) 8556 err += efunc(pc, "non-zero reserved bits\n"); 8557 break; 8558 case DIF_OP_TST: 8559 if (r1 >= nregs) 8560 err += efunc(pc, "invalid register %u\n", r1); 8561 if (r2 != 0 || rd != 0) 8562 err += efunc(pc, "non-zero reserved bits\n"); 8563 break; 8564 case DIF_OP_BA: 8565 case DIF_OP_BE: 8566 case DIF_OP_BNE: 8567 case DIF_OP_BG: 8568 case DIF_OP_BGU: 8569 case DIF_OP_BGE: 8570 case DIF_OP_BGEU: 8571 case DIF_OP_BL: 8572 case DIF_OP_BLU: 8573 case DIF_OP_BLE: 8574 case DIF_OP_BLEU: 8575 if (label >= dp->dtdo_len) { 8576 err += efunc(pc, "invalid branch target %u\n", 8577 label); 8578 } 8579 if (label <= pc) { 8580 err += efunc(pc, "backward branch to %u\n", 8581 label); 8582 } 8583 break; 8584 case DIF_OP_RET: 8585 if (r1 != 0 || r2 != 0) 8586 err += efunc(pc, "non-zero reserved bits\n"); 8587 if (rd >= nregs) 8588 err += efunc(pc, "invalid register %u\n", rd); 8589 break; 8590 case DIF_OP_NOP: 8591 case DIF_OP_POPTS: 8592 case DIF_OP_FLUSHTS: 8593 if (r1 != 0 || r2 != 0 || rd != 0) 8594 err += efunc(pc, "non-zero reserved bits\n"); 8595 break; 8596 case DIF_OP_SETX: 8597 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8598 err += efunc(pc, "invalid integer ref %u\n", 8599 DIF_INSTR_INTEGER(instr)); 8600 } 8601 if (rd >= nregs) 8602 err += efunc(pc, "invalid register %u\n", rd); 8603 if (rd == 0) 8604 err += efunc(pc, "cannot write to %r0\n"); 8605 break; 8606 case DIF_OP_SETS: 8607 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8608 err += efunc(pc, "invalid string ref %u\n", 8609 DIF_INSTR_STRING(instr)); 8610 } 8611 if (rd >= nregs) 8612 err += efunc(pc, "invalid register %u\n", rd); 8613 if (rd == 0) 8614 err += efunc(pc, "cannot write to %r0\n"); 8615 break; 8616 case DIF_OP_LDGA: 8617 case DIF_OP_LDTA: 8618 if (r1 > DIF_VAR_ARRAY_MAX) 8619 err += efunc(pc, "invalid array %u\n", r1); 8620 if (r2 >= nregs) 8621 err += efunc(pc, "invalid register %u\n", r2); 8622 if (rd >= nregs) 8623 err += efunc(pc, "invalid register %u\n", rd); 8624 if (rd == 0) 8625 err += efunc(pc, "cannot write to %r0\n"); 8626 break; 8627 case DIF_OP_LDGS: 8628 case DIF_OP_LDTS: 8629 case DIF_OP_LDLS: 8630 case DIF_OP_LDGAA: 8631 case DIF_OP_LDTAA: 8632 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8633 err += efunc(pc, "invalid variable %u\n", v); 8634 if (rd >= nregs) 8635 err += efunc(pc, "invalid register %u\n", rd); 8636 if (rd == 0) 8637 err += efunc(pc, "cannot write to %r0\n"); 8638 break; 8639 case DIF_OP_STGS: 8640 case DIF_OP_STTS: 8641 case DIF_OP_STLS: 8642 case DIF_OP_STGAA: 8643 case DIF_OP_STTAA: 8644 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8645 err += efunc(pc, "invalid variable %u\n", v); 8646 if (rs >= nregs) 8647 err += efunc(pc, "invalid register %u\n", rd); 8648 break; 8649 case DIF_OP_CALL: 8650 if (subr > DIF_SUBR_MAX) 8651 err += efunc(pc, "invalid subr %u\n", subr); 8652 if (rd >= nregs) 8653 err += efunc(pc, "invalid register %u\n", rd); 8654 if (rd == 0) 8655 err += efunc(pc, "cannot write to %r0\n"); 8656 8657 if (subr == DIF_SUBR_COPYOUT || 8658 subr == DIF_SUBR_COPYOUTSTR) { 8659 dp->dtdo_destructive = 1; 8660 } 8661 break; 8662 case DIF_OP_PUSHTR: 8663 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8664 err += efunc(pc, "invalid ref type %u\n", type); 8665 if (r2 >= nregs) 8666 err += efunc(pc, "invalid register %u\n", r2); 8667 if (rs >= nregs) 8668 err += efunc(pc, "invalid register %u\n", rs); 8669 break; 8670 case DIF_OP_PUSHTV: 8671 if (type != DIF_TYPE_CTF) 8672 err += efunc(pc, "invalid val type %u\n", type); 8673 if (r2 >= nregs) 8674 err += efunc(pc, "invalid register %u\n", r2); 8675 if (rs >= nregs) 8676 err += efunc(pc, "invalid register %u\n", rs); 8677 break; 8678 default: 8679 err += efunc(pc, "invalid opcode %u\n", 8680 DIF_INSTR_OP(instr)); 8681 } 8682 } 8683 8684 if (dp->dtdo_len != 0 && 8685 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8686 err += efunc(dp->dtdo_len - 1, 8687 "expected 'ret' as last DIF instruction\n"); 8688 } 8689 8690 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8691 /* 8692 * If we're not returning by reference, the size must be either 8693 * 0 or the size of one of the base types. 8694 */ 8695 switch (dp->dtdo_rtype.dtdt_size) { 8696 case 0: 8697 case sizeof (uint8_t): 8698 case sizeof (uint16_t): 8699 case sizeof (uint32_t): 8700 case sizeof (uint64_t): 8701 break; 8702 8703 default: 8704 err += efunc(dp->dtdo_len - 1, "bad return size"); 8705 } 8706 } 8707 8708 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8709 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8710 dtrace_diftype_t *vt, *et; 8711 uint_t id, ndx; 8712 8713 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8714 v->dtdv_scope != DIFV_SCOPE_THREAD && 8715 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8716 err += efunc(i, "unrecognized variable scope %d\n", 8717 v->dtdv_scope); 8718 break; 8719 } 8720 8721 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8722 v->dtdv_kind != DIFV_KIND_SCALAR) { 8723 err += efunc(i, "unrecognized variable type %d\n", 8724 v->dtdv_kind); 8725 break; 8726 } 8727 8728 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8729 err += efunc(i, "%d exceeds variable id limit\n", id); 8730 break; 8731 } 8732 8733 if (id < DIF_VAR_OTHER_UBASE) 8734 continue; 8735 8736 /* 8737 * For user-defined variables, we need to check that this 8738 * definition is identical to any previous definition that we 8739 * encountered. 8740 */ 8741 ndx = id - DIF_VAR_OTHER_UBASE; 8742 8743 switch (v->dtdv_scope) { 8744 case DIFV_SCOPE_GLOBAL: 8745 if (ndx < vstate->dtvs_nglobals) { 8746 dtrace_statvar_t *svar; 8747 8748 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8749 existing = &svar->dtsv_var; 8750 } 8751 8752 break; 8753 8754 case DIFV_SCOPE_THREAD: 8755 if (ndx < vstate->dtvs_ntlocals) 8756 existing = &vstate->dtvs_tlocals[ndx]; 8757 break; 8758 8759 case DIFV_SCOPE_LOCAL: 8760 if (ndx < vstate->dtvs_nlocals) { 8761 dtrace_statvar_t *svar; 8762 8763 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8764 existing = &svar->dtsv_var; 8765 } 8766 8767 break; 8768 } 8769 8770 vt = &v->dtdv_type; 8771 8772 if (vt->dtdt_flags & DIF_TF_BYREF) { 8773 if (vt->dtdt_size == 0) { 8774 err += efunc(i, "zero-sized variable\n"); 8775 break; 8776 } 8777 8778 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8779 vt->dtdt_size > dtrace_global_maxsize) { 8780 err += efunc(i, "oversized by-ref global\n"); 8781 break; 8782 } 8783 } 8784 8785 if (existing == NULL || existing->dtdv_id == 0) 8786 continue; 8787 8788 ASSERT(existing->dtdv_id == v->dtdv_id); 8789 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8790 8791 if (existing->dtdv_kind != v->dtdv_kind) 8792 err += efunc(i, "%d changed variable kind\n", id); 8793 8794 et = &existing->dtdv_type; 8795 8796 if (vt->dtdt_flags != et->dtdt_flags) { 8797 err += efunc(i, "%d changed variable type flags\n", id); 8798 break; 8799 } 8800 8801 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8802 err += efunc(i, "%d changed variable type size\n", id); 8803 break; 8804 } 8805 } 8806 8807 return (err); 8808 } 8809 8810 /* 8811 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8812 * are much more constrained than normal DIFOs. Specifically, they may 8813 * not: 8814 * 8815 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8816 * miscellaneous string routines 8817 * 2. Access DTrace variables other than the args[] array, and the 8818 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8819 * 3. Have thread-local variables. 8820 * 4. Have dynamic variables. 8821 */ 8822 static int 8823 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8824 { 8825 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8826 int err = 0; 8827 uint_t pc; 8828 8829 for (pc = 0; pc < dp->dtdo_len; pc++) { 8830 dif_instr_t instr = dp->dtdo_buf[pc]; 8831 8832 uint_t v = DIF_INSTR_VAR(instr); 8833 uint_t subr = DIF_INSTR_SUBR(instr); 8834 uint_t op = DIF_INSTR_OP(instr); 8835 8836 switch (op) { 8837 case DIF_OP_OR: 8838 case DIF_OP_XOR: 8839 case DIF_OP_AND: 8840 case DIF_OP_SLL: 8841 case DIF_OP_SRL: 8842 case DIF_OP_SRA: 8843 case DIF_OP_SUB: 8844 case DIF_OP_ADD: 8845 case DIF_OP_MUL: 8846 case DIF_OP_SDIV: 8847 case DIF_OP_UDIV: 8848 case DIF_OP_SREM: 8849 case DIF_OP_UREM: 8850 case DIF_OP_COPYS: 8851 case DIF_OP_NOT: 8852 case DIF_OP_MOV: 8853 case DIF_OP_RLDSB: 8854 case DIF_OP_RLDSH: 8855 case DIF_OP_RLDSW: 8856 case DIF_OP_RLDUB: 8857 case DIF_OP_RLDUH: 8858 case DIF_OP_RLDUW: 8859 case DIF_OP_RLDX: 8860 case DIF_OP_ULDSB: 8861 case DIF_OP_ULDSH: 8862 case DIF_OP_ULDSW: 8863 case DIF_OP_ULDUB: 8864 case DIF_OP_ULDUH: 8865 case DIF_OP_ULDUW: 8866 case DIF_OP_ULDX: 8867 case DIF_OP_STB: 8868 case DIF_OP_STH: 8869 case DIF_OP_STW: 8870 case DIF_OP_STX: 8871 case DIF_OP_ALLOCS: 8872 case DIF_OP_CMP: 8873 case DIF_OP_SCMP: 8874 case DIF_OP_TST: 8875 case DIF_OP_BA: 8876 case DIF_OP_BE: 8877 case DIF_OP_BNE: 8878 case DIF_OP_BG: 8879 case DIF_OP_BGU: 8880 case DIF_OP_BGE: 8881 case DIF_OP_BGEU: 8882 case DIF_OP_BL: 8883 case DIF_OP_BLU: 8884 case DIF_OP_BLE: 8885 case DIF_OP_BLEU: 8886 case DIF_OP_RET: 8887 case DIF_OP_NOP: 8888 case DIF_OP_POPTS: 8889 case DIF_OP_FLUSHTS: 8890 case DIF_OP_SETX: 8891 case DIF_OP_SETS: 8892 case DIF_OP_LDGA: 8893 case DIF_OP_LDLS: 8894 case DIF_OP_STGS: 8895 case DIF_OP_STLS: 8896 case DIF_OP_PUSHTR: 8897 case DIF_OP_PUSHTV: 8898 break; 8899 8900 case DIF_OP_LDGS: 8901 if (v >= DIF_VAR_OTHER_UBASE) 8902 break; 8903 8904 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8905 break; 8906 8907 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8908 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8909 v == DIF_VAR_EXECARGS || 8910 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8911 v == DIF_VAR_UID || v == DIF_VAR_GID) 8912 break; 8913 8914 err += efunc(pc, "illegal variable %u\n", v); 8915 break; 8916 8917 case DIF_OP_LDTA: 8918 case DIF_OP_LDTS: 8919 case DIF_OP_LDGAA: 8920 case DIF_OP_LDTAA: 8921 err += efunc(pc, "illegal dynamic variable load\n"); 8922 break; 8923 8924 case DIF_OP_STTS: 8925 case DIF_OP_STGAA: 8926 case DIF_OP_STTAA: 8927 err += efunc(pc, "illegal dynamic variable store\n"); 8928 break; 8929 8930 case DIF_OP_CALL: 8931 if (subr == DIF_SUBR_ALLOCA || 8932 subr == DIF_SUBR_BCOPY || 8933 subr == DIF_SUBR_COPYIN || 8934 subr == DIF_SUBR_COPYINTO || 8935 subr == DIF_SUBR_COPYINSTR || 8936 subr == DIF_SUBR_INDEX || 8937 subr == DIF_SUBR_INET_NTOA || 8938 subr == DIF_SUBR_INET_NTOA6 || 8939 subr == DIF_SUBR_INET_NTOP || 8940 subr == DIF_SUBR_LLTOSTR || 8941 subr == DIF_SUBR_RINDEX || 8942 subr == DIF_SUBR_STRCHR || 8943 subr == DIF_SUBR_STRJOIN || 8944 subr == DIF_SUBR_STRRCHR || 8945 subr == DIF_SUBR_STRSTR || 8946 subr == DIF_SUBR_HTONS || 8947 subr == DIF_SUBR_HTONL || 8948 subr == DIF_SUBR_HTONLL || 8949 subr == DIF_SUBR_NTOHS || 8950 subr == DIF_SUBR_NTOHL || 8951 subr == DIF_SUBR_NTOHLL || 8952 subr == DIF_SUBR_MEMREF || 8953 subr == DIF_SUBR_TYPEREF) 8954 break; 8955 8956 err += efunc(pc, "invalid subr %u\n", subr); 8957 break; 8958 8959 default: 8960 err += efunc(pc, "invalid opcode %u\n", 8961 DIF_INSTR_OP(instr)); 8962 } 8963 } 8964 8965 return (err); 8966 } 8967 8968 /* 8969 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8970 * basis; 0 if not. 8971 */ 8972 static int 8973 dtrace_difo_cacheable(dtrace_difo_t *dp) 8974 { 8975 int i; 8976 8977 if (dp == NULL) 8978 return (0); 8979 8980 for (i = 0; i < dp->dtdo_varlen; i++) { 8981 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8982 8983 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8984 continue; 8985 8986 switch (v->dtdv_id) { 8987 case DIF_VAR_CURTHREAD: 8988 case DIF_VAR_PID: 8989 case DIF_VAR_TID: 8990 case DIF_VAR_EXECARGS: 8991 case DIF_VAR_EXECNAME: 8992 case DIF_VAR_ZONENAME: 8993 break; 8994 8995 default: 8996 return (0); 8997 } 8998 } 8999 9000 /* 9001 * This DIF object may be cacheable. Now we need to look for any 9002 * array loading instructions, any memory loading instructions, or 9003 * any stores to thread-local variables. 9004 */ 9005 for (i = 0; i < dp->dtdo_len; i++) { 9006 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9007 9008 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9009 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9010 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9011 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9012 return (0); 9013 } 9014 9015 return (1); 9016 } 9017 9018 static void 9019 dtrace_difo_hold(dtrace_difo_t *dp) 9020 { 9021 int i; 9022 9023 ASSERT(MUTEX_HELD(&dtrace_lock)); 9024 9025 dp->dtdo_refcnt++; 9026 ASSERT(dp->dtdo_refcnt != 0); 9027 9028 /* 9029 * We need to check this DIF object for references to the variable 9030 * DIF_VAR_VTIMESTAMP. 9031 */ 9032 for (i = 0; i < dp->dtdo_varlen; i++) { 9033 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9034 9035 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9036 continue; 9037 9038 if (dtrace_vtime_references++ == 0) 9039 dtrace_vtime_enable(); 9040 } 9041 } 9042 9043 /* 9044 * This routine calculates the dynamic variable chunksize for a given DIF 9045 * object. The calculation is not fool-proof, and can probably be tricked by 9046 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9047 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9048 * if a dynamic variable size exceeds the chunksize. 9049 */ 9050 static void 9051 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9052 { 9053 uint64_t sval = 0; 9054 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9055 const dif_instr_t *text = dp->dtdo_buf; 9056 uint_t pc, srd = 0; 9057 uint_t ttop = 0; 9058 size_t size, ksize; 9059 uint_t id, i; 9060 9061 for (pc = 0; pc < dp->dtdo_len; pc++) { 9062 dif_instr_t instr = text[pc]; 9063 uint_t op = DIF_INSTR_OP(instr); 9064 uint_t rd = DIF_INSTR_RD(instr); 9065 uint_t r1 = DIF_INSTR_R1(instr); 9066 uint_t nkeys = 0; 9067 uchar_t scope = 0; 9068 9069 dtrace_key_t *key = tupregs; 9070 9071 switch (op) { 9072 case DIF_OP_SETX: 9073 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9074 srd = rd; 9075 continue; 9076 9077 case DIF_OP_STTS: 9078 key = &tupregs[DIF_DTR_NREGS]; 9079 key[0].dttk_size = 0; 9080 key[1].dttk_size = 0; 9081 nkeys = 2; 9082 scope = DIFV_SCOPE_THREAD; 9083 break; 9084 9085 case DIF_OP_STGAA: 9086 case DIF_OP_STTAA: 9087 nkeys = ttop; 9088 9089 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9090 key[nkeys++].dttk_size = 0; 9091 9092 key[nkeys++].dttk_size = 0; 9093 9094 if (op == DIF_OP_STTAA) { 9095 scope = DIFV_SCOPE_THREAD; 9096 } else { 9097 scope = DIFV_SCOPE_GLOBAL; 9098 } 9099 9100 break; 9101 9102 case DIF_OP_PUSHTR: 9103 if (ttop == DIF_DTR_NREGS) 9104 return; 9105 9106 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9107 /* 9108 * If the register for the size of the "pushtr" 9109 * is %r0 (or the value is 0) and the type is 9110 * a string, we'll use the system-wide default 9111 * string size. 9112 */ 9113 tupregs[ttop++].dttk_size = 9114 dtrace_strsize_default; 9115 } else { 9116 if (srd == 0) 9117 return; 9118 9119 tupregs[ttop++].dttk_size = sval; 9120 } 9121 9122 break; 9123 9124 case DIF_OP_PUSHTV: 9125 if (ttop == DIF_DTR_NREGS) 9126 return; 9127 9128 tupregs[ttop++].dttk_size = 0; 9129 break; 9130 9131 case DIF_OP_FLUSHTS: 9132 ttop = 0; 9133 break; 9134 9135 case DIF_OP_POPTS: 9136 if (ttop != 0) 9137 ttop--; 9138 break; 9139 } 9140 9141 sval = 0; 9142 srd = 0; 9143 9144 if (nkeys == 0) 9145 continue; 9146 9147 /* 9148 * We have a dynamic variable allocation; calculate its size. 9149 */ 9150 for (ksize = 0, i = 0; i < nkeys; i++) 9151 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9152 9153 size = sizeof (dtrace_dynvar_t); 9154 size += sizeof (dtrace_key_t) * (nkeys - 1); 9155 size += ksize; 9156 9157 /* 9158 * Now we need to determine the size of the stored data. 9159 */ 9160 id = DIF_INSTR_VAR(instr); 9161 9162 for (i = 0; i < dp->dtdo_varlen; i++) { 9163 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9164 9165 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9166 size += v->dtdv_type.dtdt_size; 9167 break; 9168 } 9169 } 9170 9171 if (i == dp->dtdo_varlen) 9172 return; 9173 9174 /* 9175 * We have the size. If this is larger than the chunk size 9176 * for our dynamic variable state, reset the chunk size. 9177 */ 9178 size = P2ROUNDUP(size, sizeof (uint64_t)); 9179 9180 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9181 vstate->dtvs_dynvars.dtds_chunksize = size; 9182 } 9183 } 9184 9185 static void 9186 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9187 { 9188 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9189 uint_t id; 9190 9191 ASSERT(MUTEX_HELD(&dtrace_lock)); 9192 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9193 9194 for (i = 0; i < dp->dtdo_varlen; i++) { 9195 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9196 dtrace_statvar_t *svar, ***svarp = NULL; 9197 size_t dsize = 0; 9198 uint8_t scope = v->dtdv_scope; 9199 int *np = NULL; 9200 9201 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9202 continue; 9203 9204 id -= DIF_VAR_OTHER_UBASE; 9205 9206 switch (scope) { 9207 case DIFV_SCOPE_THREAD: 9208 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9209 dtrace_difv_t *tlocals; 9210 9211 if ((ntlocals = (otlocals << 1)) == 0) 9212 ntlocals = 1; 9213 9214 osz = otlocals * sizeof (dtrace_difv_t); 9215 nsz = ntlocals * sizeof (dtrace_difv_t); 9216 9217 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9218 9219 if (osz != 0) { 9220 bcopy(vstate->dtvs_tlocals, 9221 tlocals, osz); 9222 kmem_free(vstate->dtvs_tlocals, osz); 9223 } 9224 9225 vstate->dtvs_tlocals = tlocals; 9226 vstate->dtvs_ntlocals = ntlocals; 9227 } 9228 9229 vstate->dtvs_tlocals[id] = *v; 9230 continue; 9231 9232 case DIFV_SCOPE_LOCAL: 9233 np = &vstate->dtvs_nlocals; 9234 svarp = &vstate->dtvs_locals; 9235 9236 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9237 dsize = NCPU * (v->dtdv_type.dtdt_size + 9238 sizeof (uint64_t)); 9239 else 9240 dsize = NCPU * sizeof (uint64_t); 9241 9242 break; 9243 9244 case DIFV_SCOPE_GLOBAL: 9245 np = &vstate->dtvs_nglobals; 9246 svarp = &vstate->dtvs_globals; 9247 9248 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9249 dsize = v->dtdv_type.dtdt_size + 9250 sizeof (uint64_t); 9251 9252 break; 9253 9254 default: 9255 ASSERT(0); 9256 } 9257 9258 while (id >= (oldsvars = *np)) { 9259 dtrace_statvar_t **statics; 9260 int newsvars, oldsize, newsize; 9261 9262 if ((newsvars = (oldsvars << 1)) == 0) 9263 newsvars = 1; 9264 9265 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9266 newsize = newsvars * sizeof (dtrace_statvar_t *); 9267 9268 statics = kmem_zalloc(newsize, KM_SLEEP); 9269 9270 if (oldsize != 0) { 9271 bcopy(*svarp, statics, oldsize); 9272 kmem_free(*svarp, oldsize); 9273 } 9274 9275 *svarp = statics; 9276 *np = newsvars; 9277 } 9278 9279 if ((svar = (*svarp)[id]) == NULL) { 9280 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9281 svar->dtsv_var = *v; 9282 9283 if ((svar->dtsv_size = dsize) != 0) { 9284 svar->dtsv_data = (uint64_t)(uintptr_t) 9285 kmem_zalloc(dsize, KM_SLEEP); 9286 } 9287 9288 (*svarp)[id] = svar; 9289 } 9290 9291 svar->dtsv_refcnt++; 9292 } 9293 9294 dtrace_difo_chunksize(dp, vstate); 9295 dtrace_difo_hold(dp); 9296 } 9297 9298 static dtrace_difo_t * 9299 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9300 { 9301 dtrace_difo_t *new; 9302 size_t sz; 9303 9304 ASSERT(dp->dtdo_buf != NULL); 9305 ASSERT(dp->dtdo_refcnt != 0); 9306 9307 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9308 9309 ASSERT(dp->dtdo_buf != NULL); 9310 sz = dp->dtdo_len * sizeof (dif_instr_t); 9311 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9312 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9313 new->dtdo_len = dp->dtdo_len; 9314 9315 if (dp->dtdo_strtab != NULL) { 9316 ASSERT(dp->dtdo_strlen != 0); 9317 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9318 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9319 new->dtdo_strlen = dp->dtdo_strlen; 9320 } 9321 9322 if (dp->dtdo_inttab != NULL) { 9323 ASSERT(dp->dtdo_intlen != 0); 9324 sz = dp->dtdo_intlen * sizeof (uint64_t); 9325 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9326 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9327 new->dtdo_intlen = dp->dtdo_intlen; 9328 } 9329 9330 if (dp->dtdo_vartab != NULL) { 9331 ASSERT(dp->dtdo_varlen != 0); 9332 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9333 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9334 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9335 new->dtdo_varlen = dp->dtdo_varlen; 9336 } 9337 9338 dtrace_difo_init(new, vstate); 9339 return (new); 9340 } 9341 9342 static void 9343 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9344 { 9345 int i; 9346 9347 ASSERT(dp->dtdo_refcnt == 0); 9348 9349 for (i = 0; i < dp->dtdo_varlen; i++) { 9350 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9351 dtrace_statvar_t *svar, **svarp = NULL; 9352 uint_t id; 9353 uint8_t scope = v->dtdv_scope; 9354 int *np = NULL; 9355 9356 switch (scope) { 9357 case DIFV_SCOPE_THREAD: 9358 continue; 9359 9360 case DIFV_SCOPE_LOCAL: 9361 np = &vstate->dtvs_nlocals; 9362 svarp = vstate->dtvs_locals; 9363 break; 9364 9365 case DIFV_SCOPE_GLOBAL: 9366 np = &vstate->dtvs_nglobals; 9367 svarp = vstate->dtvs_globals; 9368 break; 9369 9370 default: 9371 ASSERT(0); 9372 } 9373 9374 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9375 continue; 9376 9377 id -= DIF_VAR_OTHER_UBASE; 9378 ASSERT(id < *np); 9379 9380 svar = svarp[id]; 9381 ASSERT(svar != NULL); 9382 ASSERT(svar->dtsv_refcnt > 0); 9383 9384 if (--svar->dtsv_refcnt > 0) 9385 continue; 9386 9387 if (svar->dtsv_size != 0) { 9388 ASSERT(svar->dtsv_data != 0); 9389 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9390 svar->dtsv_size); 9391 } 9392 9393 kmem_free(svar, sizeof (dtrace_statvar_t)); 9394 svarp[id] = NULL; 9395 } 9396 9397 if (dp->dtdo_buf != NULL) 9398 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9399 if (dp->dtdo_inttab != NULL) 9400 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9401 if (dp->dtdo_strtab != NULL) 9402 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9403 if (dp->dtdo_vartab != NULL) 9404 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9405 9406 kmem_free(dp, sizeof (dtrace_difo_t)); 9407 } 9408 9409 static void 9410 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9411 { 9412 int i; 9413 9414 ASSERT(MUTEX_HELD(&dtrace_lock)); 9415 ASSERT(dp->dtdo_refcnt != 0); 9416 9417 for (i = 0; i < dp->dtdo_varlen; i++) { 9418 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9419 9420 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9421 continue; 9422 9423 ASSERT(dtrace_vtime_references > 0); 9424 if (--dtrace_vtime_references == 0) 9425 dtrace_vtime_disable(); 9426 } 9427 9428 if (--dp->dtdo_refcnt == 0) 9429 dtrace_difo_destroy(dp, vstate); 9430 } 9431 9432 /* 9433 * DTrace Format Functions 9434 */ 9435 static uint16_t 9436 dtrace_format_add(dtrace_state_t *state, char *str) 9437 { 9438 char *fmt, **new; 9439 uint16_t ndx, len = strlen(str) + 1; 9440 9441 fmt = kmem_zalloc(len, KM_SLEEP); 9442 bcopy(str, fmt, len); 9443 9444 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9445 if (state->dts_formats[ndx] == NULL) { 9446 state->dts_formats[ndx] = fmt; 9447 return (ndx + 1); 9448 } 9449 } 9450 9451 if (state->dts_nformats == USHRT_MAX) { 9452 /* 9453 * This is only likely if a denial-of-service attack is being 9454 * attempted. As such, it's okay to fail silently here. 9455 */ 9456 kmem_free(fmt, len); 9457 return (0); 9458 } 9459 9460 /* 9461 * For simplicity, we always resize the formats array to be exactly the 9462 * number of formats. 9463 */ 9464 ndx = state->dts_nformats++; 9465 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9466 9467 if (state->dts_formats != NULL) { 9468 ASSERT(ndx != 0); 9469 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9470 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9471 } 9472 9473 state->dts_formats = new; 9474 state->dts_formats[ndx] = fmt; 9475 9476 return (ndx + 1); 9477 } 9478 9479 static void 9480 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9481 { 9482 char *fmt; 9483 9484 ASSERT(state->dts_formats != NULL); 9485 ASSERT(format <= state->dts_nformats); 9486 ASSERT(state->dts_formats[format - 1] != NULL); 9487 9488 fmt = state->dts_formats[format - 1]; 9489 kmem_free(fmt, strlen(fmt) + 1); 9490 state->dts_formats[format - 1] = NULL; 9491 } 9492 9493 static void 9494 dtrace_format_destroy(dtrace_state_t *state) 9495 { 9496 int i; 9497 9498 if (state->dts_nformats == 0) { 9499 ASSERT(state->dts_formats == NULL); 9500 return; 9501 } 9502 9503 ASSERT(state->dts_formats != NULL); 9504 9505 for (i = 0; i < state->dts_nformats; i++) { 9506 char *fmt = state->dts_formats[i]; 9507 9508 if (fmt == NULL) 9509 continue; 9510 9511 kmem_free(fmt, strlen(fmt) + 1); 9512 } 9513 9514 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9515 state->dts_nformats = 0; 9516 state->dts_formats = NULL; 9517 } 9518 9519 /* 9520 * DTrace Predicate Functions 9521 */ 9522 static dtrace_predicate_t * 9523 dtrace_predicate_create(dtrace_difo_t *dp) 9524 { 9525 dtrace_predicate_t *pred; 9526 9527 ASSERT(MUTEX_HELD(&dtrace_lock)); 9528 ASSERT(dp->dtdo_refcnt != 0); 9529 9530 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9531 pred->dtp_difo = dp; 9532 pred->dtp_refcnt = 1; 9533 9534 if (!dtrace_difo_cacheable(dp)) 9535 return (pred); 9536 9537 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9538 /* 9539 * This is only theoretically possible -- we have had 2^32 9540 * cacheable predicates on this machine. We cannot allow any 9541 * more predicates to become cacheable: as unlikely as it is, 9542 * there may be a thread caching a (now stale) predicate cache 9543 * ID. (N.B.: the temptation is being successfully resisted to 9544 * have this cmn_err() "Holy shit -- we executed this code!") 9545 */ 9546 return (pred); 9547 } 9548 9549 pred->dtp_cacheid = dtrace_predcache_id++; 9550 9551 return (pred); 9552 } 9553 9554 static void 9555 dtrace_predicate_hold(dtrace_predicate_t *pred) 9556 { 9557 ASSERT(MUTEX_HELD(&dtrace_lock)); 9558 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9559 ASSERT(pred->dtp_refcnt > 0); 9560 9561 pred->dtp_refcnt++; 9562 } 9563 9564 static void 9565 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9566 { 9567 dtrace_difo_t *dp = pred->dtp_difo; 9568 9569 ASSERT(MUTEX_HELD(&dtrace_lock)); 9570 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9571 ASSERT(pred->dtp_refcnt > 0); 9572 9573 if (--pred->dtp_refcnt == 0) { 9574 dtrace_difo_release(pred->dtp_difo, vstate); 9575 kmem_free(pred, sizeof (dtrace_predicate_t)); 9576 } 9577 } 9578 9579 /* 9580 * DTrace Action Description Functions 9581 */ 9582 static dtrace_actdesc_t * 9583 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9584 uint64_t uarg, uint64_t arg) 9585 { 9586 dtrace_actdesc_t *act; 9587 9588 #if defined(sun) 9589 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9590 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9591 #endif 9592 9593 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9594 act->dtad_kind = kind; 9595 act->dtad_ntuple = ntuple; 9596 act->dtad_uarg = uarg; 9597 act->dtad_arg = arg; 9598 act->dtad_refcnt = 1; 9599 9600 return (act); 9601 } 9602 9603 static void 9604 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9605 { 9606 ASSERT(act->dtad_refcnt >= 1); 9607 act->dtad_refcnt++; 9608 } 9609 9610 static void 9611 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9612 { 9613 dtrace_actkind_t kind = act->dtad_kind; 9614 dtrace_difo_t *dp; 9615 9616 ASSERT(act->dtad_refcnt >= 1); 9617 9618 if (--act->dtad_refcnt != 0) 9619 return; 9620 9621 if ((dp = act->dtad_difo) != NULL) 9622 dtrace_difo_release(dp, vstate); 9623 9624 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9625 char *str = (char *)(uintptr_t)act->dtad_arg; 9626 9627 #if defined(sun) 9628 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9629 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9630 #endif 9631 9632 if (str != NULL) 9633 kmem_free(str, strlen(str) + 1); 9634 } 9635 9636 kmem_free(act, sizeof (dtrace_actdesc_t)); 9637 } 9638 9639 /* 9640 * DTrace ECB Functions 9641 */ 9642 static dtrace_ecb_t * 9643 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9644 { 9645 dtrace_ecb_t *ecb; 9646 dtrace_epid_t epid; 9647 9648 ASSERT(MUTEX_HELD(&dtrace_lock)); 9649 9650 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9651 ecb->dte_predicate = NULL; 9652 ecb->dte_probe = probe; 9653 9654 /* 9655 * The default size is the size of the default action: recording 9656 * the epid. 9657 */ 9658 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9659 ecb->dte_alignment = sizeof (dtrace_epid_t); 9660 9661 epid = state->dts_epid++; 9662 9663 if (epid - 1 >= state->dts_necbs) { 9664 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9665 int necbs = state->dts_necbs << 1; 9666 9667 ASSERT(epid == state->dts_necbs + 1); 9668 9669 if (necbs == 0) { 9670 ASSERT(oecbs == NULL); 9671 necbs = 1; 9672 } 9673 9674 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9675 9676 if (oecbs != NULL) 9677 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9678 9679 dtrace_membar_producer(); 9680 state->dts_ecbs = ecbs; 9681 9682 if (oecbs != NULL) { 9683 /* 9684 * If this state is active, we must dtrace_sync() 9685 * before we can free the old dts_ecbs array: we're 9686 * coming in hot, and there may be active ring 9687 * buffer processing (which indexes into the dts_ecbs 9688 * array) on another CPU. 9689 */ 9690 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9691 dtrace_sync(); 9692 9693 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9694 } 9695 9696 dtrace_membar_producer(); 9697 state->dts_necbs = necbs; 9698 } 9699 9700 ecb->dte_state = state; 9701 9702 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9703 dtrace_membar_producer(); 9704 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9705 9706 return (ecb); 9707 } 9708 9709 static void 9710 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9711 { 9712 dtrace_probe_t *probe = ecb->dte_probe; 9713 9714 ASSERT(MUTEX_HELD(&cpu_lock)); 9715 ASSERT(MUTEX_HELD(&dtrace_lock)); 9716 ASSERT(ecb->dte_next == NULL); 9717 9718 if (probe == NULL) { 9719 /* 9720 * This is the NULL probe -- there's nothing to do. 9721 */ 9722 return; 9723 } 9724 9725 if (probe->dtpr_ecb == NULL) { 9726 dtrace_provider_t *prov = probe->dtpr_provider; 9727 9728 /* 9729 * We're the first ECB on this probe. 9730 */ 9731 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9732 9733 if (ecb->dte_predicate != NULL) 9734 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9735 9736 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9737 probe->dtpr_id, probe->dtpr_arg); 9738 } else { 9739 /* 9740 * This probe is already active. Swing the last pointer to 9741 * point to the new ECB, and issue a dtrace_sync() to assure 9742 * that all CPUs have seen the change. 9743 */ 9744 ASSERT(probe->dtpr_ecb_last != NULL); 9745 probe->dtpr_ecb_last->dte_next = ecb; 9746 probe->dtpr_ecb_last = ecb; 9747 probe->dtpr_predcache = 0; 9748 9749 dtrace_sync(); 9750 } 9751 } 9752 9753 static void 9754 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9755 { 9756 uint32_t maxalign = sizeof (dtrace_epid_t); 9757 uint32_t align = sizeof (uint8_t), offs, diff; 9758 dtrace_action_t *act; 9759 int wastuple = 0; 9760 uint32_t aggbase = UINT32_MAX; 9761 dtrace_state_t *state = ecb->dte_state; 9762 9763 /* 9764 * If we record anything, we always record the epid. (And we always 9765 * record it first.) 9766 */ 9767 offs = sizeof (dtrace_epid_t); 9768 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9769 9770 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9771 dtrace_recdesc_t *rec = &act->dta_rec; 9772 9773 if ((align = rec->dtrd_alignment) > maxalign) 9774 maxalign = align; 9775 9776 if (!wastuple && act->dta_intuple) { 9777 /* 9778 * This is the first record in a tuple. Align the 9779 * offset to be at offset 4 in an 8-byte aligned 9780 * block. 9781 */ 9782 diff = offs + sizeof (dtrace_aggid_t); 9783 9784 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9785 offs += sizeof (uint64_t) - diff; 9786 9787 aggbase = offs - sizeof (dtrace_aggid_t); 9788 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9789 } 9790 9791 /*LINTED*/ 9792 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9793 /* 9794 * The current offset is not properly aligned; align it. 9795 */ 9796 offs += align - diff; 9797 } 9798 9799 rec->dtrd_offset = offs; 9800 9801 if (offs + rec->dtrd_size > ecb->dte_needed) { 9802 ecb->dte_needed = offs + rec->dtrd_size; 9803 9804 if (ecb->dte_needed > state->dts_needed) 9805 state->dts_needed = ecb->dte_needed; 9806 } 9807 9808 if (DTRACEACT_ISAGG(act->dta_kind)) { 9809 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9810 dtrace_action_t *first = agg->dtag_first, *prev; 9811 9812 ASSERT(rec->dtrd_size != 0 && first != NULL); 9813 ASSERT(wastuple); 9814 ASSERT(aggbase != UINT32_MAX); 9815 9816 agg->dtag_base = aggbase; 9817 9818 while ((prev = first->dta_prev) != NULL && 9819 DTRACEACT_ISAGG(prev->dta_kind)) { 9820 agg = (dtrace_aggregation_t *)prev; 9821 first = agg->dtag_first; 9822 } 9823 9824 if (prev != NULL) { 9825 offs = prev->dta_rec.dtrd_offset + 9826 prev->dta_rec.dtrd_size; 9827 } else { 9828 offs = sizeof (dtrace_epid_t); 9829 } 9830 wastuple = 0; 9831 } else { 9832 if (!act->dta_intuple) 9833 ecb->dte_size = offs + rec->dtrd_size; 9834 9835 offs += rec->dtrd_size; 9836 } 9837 9838 wastuple = act->dta_intuple; 9839 } 9840 9841 if ((act = ecb->dte_action) != NULL && 9842 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9843 ecb->dte_size == sizeof (dtrace_epid_t)) { 9844 /* 9845 * If the size is still sizeof (dtrace_epid_t), then all 9846 * actions store no data; set the size to 0. 9847 */ 9848 ecb->dte_alignment = maxalign; 9849 ecb->dte_size = 0; 9850 9851 /* 9852 * If the needed space is still sizeof (dtrace_epid_t), then 9853 * all actions need no additional space; set the needed 9854 * size to 0. 9855 */ 9856 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9857 ecb->dte_needed = 0; 9858 9859 return; 9860 } 9861 9862 /* 9863 * Set our alignment, and make sure that the dte_size and dte_needed 9864 * are aligned to the size of an EPID. 9865 */ 9866 ecb->dte_alignment = maxalign; 9867 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9868 ~(sizeof (dtrace_epid_t) - 1); 9869 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9870 ~(sizeof (dtrace_epid_t) - 1); 9871 ASSERT(ecb->dte_size <= ecb->dte_needed); 9872 } 9873 9874 static dtrace_action_t * 9875 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9876 { 9877 dtrace_aggregation_t *agg; 9878 size_t size = sizeof (uint64_t); 9879 int ntuple = desc->dtad_ntuple; 9880 dtrace_action_t *act; 9881 dtrace_recdesc_t *frec; 9882 dtrace_aggid_t aggid; 9883 dtrace_state_t *state = ecb->dte_state; 9884 9885 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9886 agg->dtag_ecb = ecb; 9887 9888 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9889 9890 switch (desc->dtad_kind) { 9891 case DTRACEAGG_MIN: 9892 agg->dtag_initial = INT64_MAX; 9893 agg->dtag_aggregate = dtrace_aggregate_min; 9894 break; 9895 9896 case DTRACEAGG_MAX: 9897 agg->dtag_initial = INT64_MIN; 9898 agg->dtag_aggregate = dtrace_aggregate_max; 9899 break; 9900 9901 case DTRACEAGG_COUNT: 9902 agg->dtag_aggregate = dtrace_aggregate_count; 9903 break; 9904 9905 case DTRACEAGG_QUANTIZE: 9906 agg->dtag_aggregate = dtrace_aggregate_quantize; 9907 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9908 sizeof (uint64_t); 9909 break; 9910 9911 case DTRACEAGG_LQUANTIZE: { 9912 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9913 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9914 9915 agg->dtag_initial = desc->dtad_arg; 9916 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9917 9918 if (step == 0 || levels == 0) 9919 goto err; 9920 9921 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9922 break; 9923 } 9924 9925 case DTRACEAGG_LLQUANTIZE: { 9926 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 9927 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 9928 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 9929 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 9930 int64_t v; 9931 9932 agg->dtag_initial = desc->dtad_arg; 9933 agg->dtag_aggregate = dtrace_aggregate_llquantize; 9934 9935 if (factor < 2 || low >= high || nsteps < factor) 9936 goto err; 9937 9938 /* 9939 * Now check that the number of steps evenly divides a power 9940 * of the factor. (This assures both integer bucket size and 9941 * linearity within each magnitude.) 9942 */ 9943 for (v = factor; v < nsteps; v *= factor) 9944 continue; 9945 9946 if ((v % nsteps) || (nsteps % factor)) 9947 goto err; 9948 9949 size = (dtrace_aggregate_llquantize_bucket(factor, 9950 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 9951 break; 9952 } 9953 9954 case DTRACEAGG_AVG: 9955 agg->dtag_aggregate = dtrace_aggregate_avg; 9956 size = sizeof (uint64_t) * 2; 9957 break; 9958 9959 case DTRACEAGG_STDDEV: 9960 agg->dtag_aggregate = dtrace_aggregate_stddev; 9961 size = sizeof (uint64_t) * 4; 9962 break; 9963 9964 case DTRACEAGG_SUM: 9965 agg->dtag_aggregate = dtrace_aggregate_sum; 9966 break; 9967 9968 default: 9969 goto err; 9970 } 9971 9972 agg->dtag_action.dta_rec.dtrd_size = size; 9973 9974 if (ntuple == 0) 9975 goto err; 9976 9977 /* 9978 * We must make sure that we have enough actions for the n-tuple. 9979 */ 9980 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9981 if (DTRACEACT_ISAGG(act->dta_kind)) 9982 break; 9983 9984 if (--ntuple == 0) { 9985 /* 9986 * This is the action with which our n-tuple begins. 9987 */ 9988 agg->dtag_first = act; 9989 goto success; 9990 } 9991 } 9992 9993 /* 9994 * This n-tuple is short by ntuple elements. Return failure. 9995 */ 9996 ASSERT(ntuple != 0); 9997 err: 9998 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9999 return (NULL); 10000 10001 success: 10002 /* 10003 * If the last action in the tuple has a size of zero, it's actually 10004 * an expression argument for the aggregating action. 10005 */ 10006 ASSERT(ecb->dte_action_last != NULL); 10007 act = ecb->dte_action_last; 10008 10009 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10010 ASSERT(act->dta_difo != NULL); 10011 10012 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10013 agg->dtag_hasarg = 1; 10014 } 10015 10016 /* 10017 * We need to allocate an id for this aggregation. 10018 */ 10019 #if defined(sun) 10020 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10021 VM_BESTFIT | VM_SLEEP); 10022 #else 10023 aggid = alloc_unr(state->dts_aggid_arena); 10024 #endif 10025 10026 if (aggid - 1 >= state->dts_naggregations) { 10027 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10028 dtrace_aggregation_t **aggs; 10029 int naggs = state->dts_naggregations << 1; 10030 int onaggs = state->dts_naggregations; 10031 10032 ASSERT(aggid == state->dts_naggregations + 1); 10033 10034 if (naggs == 0) { 10035 ASSERT(oaggs == NULL); 10036 naggs = 1; 10037 } 10038 10039 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10040 10041 if (oaggs != NULL) { 10042 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10043 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10044 } 10045 10046 state->dts_aggregations = aggs; 10047 state->dts_naggregations = naggs; 10048 } 10049 10050 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10051 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10052 10053 frec = &agg->dtag_first->dta_rec; 10054 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10055 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10056 10057 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10058 ASSERT(!act->dta_intuple); 10059 act->dta_intuple = 1; 10060 } 10061 10062 return (&agg->dtag_action); 10063 } 10064 10065 static void 10066 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10067 { 10068 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10069 dtrace_state_t *state = ecb->dte_state; 10070 dtrace_aggid_t aggid = agg->dtag_id; 10071 10072 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10073 #if defined(sun) 10074 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10075 #else 10076 free_unr(state->dts_aggid_arena, aggid); 10077 #endif 10078 10079 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10080 state->dts_aggregations[aggid - 1] = NULL; 10081 10082 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10083 } 10084 10085 static int 10086 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10087 { 10088 dtrace_action_t *action, *last; 10089 dtrace_difo_t *dp = desc->dtad_difo; 10090 uint32_t size = 0, align = sizeof (uint8_t), mask; 10091 uint16_t format = 0; 10092 dtrace_recdesc_t *rec; 10093 dtrace_state_t *state = ecb->dte_state; 10094 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10095 uint64_t arg = desc->dtad_arg; 10096 10097 ASSERT(MUTEX_HELD(&dtrace_lock)); 10098 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10099 10100 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10101 /* 10102 * If this is an aggregating action, there must be neither 10103 * a speculate nor a commit on the action chain. 10104 */ 10105 dtrace_action_t *act; 10106 10107 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10108 if (act->dta_kind == DTRACEACT_COMMIT) 10109 return (EINVAL); 10110 10111 if (act->dta_kind == DTRACEACT_SPECULATE) 10112 return (EINVAL); 10113 } 10114 10115 action = dtrace_ecb_aggregation_create(ecb, desc); 10116 10117 if (action == NULL) 10118 return (EINVAL); 10119 } else { 10120 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10121 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10122 dp != NULL && dp->dtdo_destructive)) { 10123 state->dts_destructive = 1; 10124 } 10125 10126 switch (desc->dtad_kind) { 10127 case DTRACEACT_PRINTF: 10128 case DTRACEACT_PRINTA: 10129 case DTRACEACT_SYSTEM: 10130 case DTRACEACT_FREOPEN: 10131 /* 10132 * We know that our arg is a string -- turn it into a 10133 * format. 10134 */ 10135 if (arg == 0) { 10136 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10137 format = 0; 10138 } else { 10139 ASSERT(arg != 0); 10140 #if defined(sun) 10141 ASSERT(arg > KERNELBASE); 10142 #endif 10143 format = dtrace_format_add(state, 10144 (char *)(uintptr_t)arg); 10145 } 10146 10147 /*FALLTHROUGH*/ 10148 case DTRACEACT_LIBACT: 10149 case DTRACEACT_DIFEXPR: 10150 if (dp == NULL) 10151 return (EINVAL); 10152 10153 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10154 break; 10155 10156 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10157 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10158 return (EINVAL); 10159 10160 size = opt[DTRACEOPT_STRSIZE]; 10161 } 10162 10163 break; 10164 10165 case DTRACEACT_STACK: 10166 if ((nframes = arg) == 0) { 10167 nframes = opt[DTRACEOPT_STACKFRAMES]; 10168 ASSERT(nframes > 0); 10169 arg = nframes; 10170 } 10171 10172 size = nframes * sizeof (pc_t); 10173 break; 10174 10175 case DTRACEACT_JSTACK: 10176 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10177 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10178 10179 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10180 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10181 10182 arg = DTRACE_USTACK_ARG(nframes, strsize); 10183 10184 /*FALLTHROUGH*/ 10185 case DTRACEACT_USTACK: 10186 if (desc->dtad_kind != DTRACEACT_JSTACK && 10187 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10188 strsize = DTRACE_USTACK_STRSIZE(arg); 10189 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10190 ASSERT(nframes > 0); 10191 arg = DTRACE_USTACK_ARG(nframes, strsize); 10192 } 10193 10194 /* 10195 * Save a slot for the pid. 10196 */ 10197 size = (nframes + 1) * sizeof (uint64_t); 10198 size += DTRACE_USTACK_STRSIZE(arg); 10199 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10200 10201 break; 10202 10203 case DTRACEACT_SYM: 10204 case DTRACEACT_MOD: 10205 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10206 sizeof (uint64_t)) || 10207 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10208 return (EINVAL); 10209 break; 10210 10211 case DTRACEACT_USYM: 10212 case DTRACEACT_UMOD: 10213 case DTRACEACT_UADDR: 10214 if (dp == NULL || 10215 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10216 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10217 return (EINVAL); 10218 10219 /* 10220 * We have a slot for the pid, plus a slot for the 10221 * argument. To keep things simple (aligned with 10222 * bitness-neutral sizing), we store each as a 64-bit 10223 * quantity. 10224 */ 10225 size = 2 * sizeof (uint64_t); 10226 break; 10227 10228 case DTRACEACT_STOP: 10229 case DTRACEACT_BREAKPOINT: 10230 case DTRACEACT_PANIC: 10231 break; 10232 10233 case DTRACEACT_CHILL: 10234 case DTRACEACT_DISCARD: 10235 case DTRACEACT_RAISE: 10236 if (dp == NULL) 10237 return (EINVAL); 10238 break; 10239 10240 case DTRACEACT_EXIT: 10241 if (dp == NULL || 10242 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10243 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10244 return (EINVAL); 10245 break; 10246 10247 case DTRACEACT_SPECULATE: 10248 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10249 return (EINVAL); 10250 10251 if (dp == NULL) 10252 return (EINVAL); 10253 10254 state->dts_speculates = 1; 10255 break; 10256 10257 case DTRACEACT_PRINTM: 10258 size = dp->dtdo_rtype.dtdt_size; 10259 break; 10260 10261 case DTRACEACT_PRINTT: 10262 size = dp->dtdo_rtype.dtdt_size; 10263 break; 10264 10265 case DTRACEACT_COMMIT: { 10266 dtrace_action_t *act = ecb->dte_action; 10267 10268 for (; act != NULL; act = act->dta_next) { 10269 if (act->dta_kind == DTRACEACT_COMMIT) 10270 return (EINVAL); 10271 } 10272 10273 if (dp == NULL) 10274 return (EINVAL); 10275 break; 10276 } 10277 10278 default: 10279 return (EINVAL); 10280 } 10281 10282 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10283 /* 10284 * If this is a data-storing action or a speculate, 10285 * we must be sure that there isn't a commit on the 10286 * action chain. 10287 */ 10288 dtrace_action_t *act = ecb->dte_action; 10289 10290 for (; act != NULL; act = act->dta_next) { 10291 if (act->dta_kind == DTRACEACT_COMMIT) 10292 return (EINVAL); 10293 } 10294 } 10295 10296 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10297 action->dta_rec.dtrd_size = size; 10298 } 10299 10300 action->dta_refcnt = 1; 10301 rec = &action->dta_rec; 10302 size = rec->dtrd_size; 10303 10304 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10305 if (!(size & mask)) { 10306 align = mask + 1; 10307 break; 10308 } 10309 } 10310 10311 action->dta_kind = desc->dtad_kind; 10312 10313 if ((action->dta_difo = dp) != NULL) 10314 dtrace_difo_hold(dp); 10315 10316 rec->dtrd_action = action->dta_kind; 10317 rec->dtrd_arg = arg; 10318 rec->dtrd_uarg = desc->dtad_uarg; 10319 rec->dtrd_alignment = (uint16_t)align; 10320 rec->dtrd_format = format; 10321 10322 if ((last = ecb->dte_action_last) != NULL) { 10323 ASSERT(ecb->dte_action != NULL); 10324 action->dta_prev = last; 10325 last->dta_next = action; 10326 } else { 10327 ASSERT(ecb->dte_action == NULL); 10328 ecb->dte_action = action; 10329 } 10330 10331 ecb->dte_action_last = action; 10332 10333 return (0); 10334 } 10335 10336 static void 10337 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10338 { 10339 dtrace_action_t *act = ecb->dte_action, *next; 10340 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10341 dtrace_difo_t *dp; 10342 uint16_t format; 10343 10344 if (act != NULL && act->dta_refcnt > 1) { 10345 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10346 act->dta_refcnt--; 10347 } else { 10348 for (; act != NULL; act = next) { 10349 next = act->dta_next; 10350 ASSERT(next != NULL || act == ecb->dte_action_last); 10351 ASSERT(act->dta_refcnt == 1); 10352 10353 if ((format = act->dta_rec.dtrd_format) != 0) 10354 dtrace_format_remove(ecb->dte_state, format); 10355 10356 if ((dp = act->dta_difo) != NULL) 10357 dtrace_difo_release(dp, vstate); 10358 10359 if (DTRACEACT_ISAGG(act->dta_kind)) { 10360 dtrace_ecb_aggregation_destroy(ecb, act); 10361 } else { 10362 kmem_free(act, sizeof (dtrace_action_t)); 10363 } 10364 } 10365 } 10366 10367 ecb->dte_action = NULL; 10368 ecb->dte_action_last = NULL; 10369 ecb->dte_size = sizeof (dtrace_epid_t); 10370 } 10371 10372 static void 10373 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10374 { 10375 /* 10376 * We disable the ECB by removing it from its probe. 10377 */ 10378 dtrace_ecb_t *pecb, *prev = NULL; 10379 dtrace_probe_t *probe = ecb->dte_probe; 10380 10381 ASSERT(MUTEX_HELD(&dtrace_lock)); 10382 10383 if (probe == NULL) { 10384 /* 10385 * This is the NULL probe; there is nothing to disable. 10386 */ 10387 return; 10388 } 10389 10390 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10391 if (pecb == ecb) 10392 break; 10393 prev = pecb; 10394 } 10395 10396 ASSERT(pecb != NULL); 10397 10398 if (prev == NULL) { 10399 probe->dtpr_ecb = ecb->dte_next; 10400 } else { 10401 prev->dte_next = ecb->dte_next; 10402 } 10403 10404 if (ecb == probe->dtpr_ecb_last) { 10405 ASSERT(ecb->dte_next == NULL); 10406 probe->dtpr_ecb_last = prev; 10407 } 10408 10409 /* 10410 * The ECB has been disconnected from the probe; now sync to assure 10411 * that all CPUs have seen the change before returning. 10412 */ 10413 dtrace_sync(); 10414 10415 if (probe->dtpr_ecb == NULL) { 10416 /* 10417 * That was the last ECB on the probe; clear the predicate 10418 * cache ID for the probe, disable it and sync one more time 10419 * to assure that we'll never hit it again. 10420 */ 10421 dtrace_provider_t *prov = probe->dtpr_provider; 10422 10423 ASSERT(ecb->dte_next == NULL); 10424 ASSERT(probe->dtpr_ecb_last == NULL); 10425 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10426 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10427 probe->dtpr_id, probe->dtpr_arg); 10428 dtrace_sync(); 10429 } else { 10430 /* 10431 * There is at least one ECB remaining on the probe. If there 10432 * is _exactly_ one, set the probe's predicate cache ID to be 10433 * the predicate cache ID of the remaining ECB. 10434 */ 10435 ASSERT(probe->dtpr_ecb_last != NULL); 10436 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10437 10438 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10439 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10440 10441 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10442 10443 if (p != NULL) 10444 probe->dtpr_predcache = p->dtp_cacheid; 10445 } 10446 10447 ecb->dte_next = NULL; 10448 } 10449 } 10450 10451 static void 10452 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10453 { 10454 dtrace_state_t *state = ecb->dte_state; 10455 dtrace_vstate_t *vstate = &state->dts_vstate; 10456 dtrace_predicate_t *pred; 10457 dtrace_epid_t epid = ecb->dte_epid; 10458 10459 ASSERT(MUTEX_HELD(&dtrace_lock)); 10460 ASSERT(ecb->dte_next == NULL); 10461 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10462 10463 if ((pred = ecb->dte_predicate) != NULL) 10464 dtrace_predicate_release(pred, vstate); 10465 10466 dtrace_ecb_action_remove(ecb); 10467 10468 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10469 state->dts_ecbs[epid - 1] = NULL; 10470 10471 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10472 } 10473 10474 static dtrace_ecb_t * 10475 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10476 dtrace_enabling_t *enab) 10477 { 10478 dtrace_ecb_t *ecb; 10479 dtrace_predicate_t *pred; 10480 dtrace_actdesc_t *act; 10481 dtrace_provider_t *prov; 10482 dtrace_ecbdesc_t *desc = enab->dten_current; 10483 10484 ASSERT(MUTEX_HELD(&dtrace_lock)); 10485 ASSERT(state != NULL); 10486 10487 ecb = dtrace_ecb_add(state, probe); 10488 ecb->dte_uarg = desc->dted_uarg; 10489 10490 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10491 dtrace_predicate_hold(pred); 10492 ecb->dte_predicate = pred; 10493 } 10494 10495 if (probe != NULL) { 10496 /* 10497 * If the provider shows more leg than the consumer is old 10498 * enough to see, we need to enable the appropriate implicit 10499 * predicate bits to prevent the ecb from activating at 10500 * revealing times. 10501 * 10502 * Providers specifying DTRACE_PRIV_USER at register time 10503 * are stating that they need the /proc-style privilege 10504 * model to be enforced, and this is what DTRACE_COND_OWNER 10505 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10506 */ 10507 prov = probe->dtpr_provider; 10508 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10509 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10510 ecb->dte_cond |= DTRACE_COND_OWNER; 10511 10512 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10513 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10514 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10515 10516 /* 10517 * If the provider shows us kernel innards and the user 10518 * is lacking sufficient privilege, enable the 10519 * DTRACE_COND_USERMODE implicit predicate. 10520 */ 10521 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10522 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10523 ecb->dte_cond |= DTRACE_COND_USERMODE; 10524 } 10525 10526 if (dtrace_ecb_create_cache != NULL) { 10527 /* 10528 * If we have a cached ecb, we'll use its action list instead 10529 * of creating our own (saving both time and space). 10530 */ 10531 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10532 dtrace_action_t *act = cached->dte_action; 10533 10534 if (act != NULL) { 10535 ASSERT(act->dta_refcnt > 0); 10536 act->dta_refcnt++; 10537 ecb->dte_action = act; 10538 ecb->dte_action_last = cached->dte_action_last; 10539 ecb->dte_needed = cached->dte_needed; 10540 ecb->dte_size = cached->dte_size; 10541 ecb->dte_alignment = cached->dte_alignment; 10542 } 10543 10544 return (ecb); 10545 } 10546 10547 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10548 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10549 dtrace_ecb_destroy(ecb); 10550 return (NULL); 10551 } 10552 } 10553 10554 dtrace_ecb_resize(ecb); 10555 10556 return (dtrace_ecb_create_cache = ecb); 10557 } 10558 10559 static int 10560 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10561 { 10562 dtrace_ecb_t *ecb; 10563 dtrace_enabling_t *enab = arg; 10564 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10565 10566 ASSERT(state != NULL); 10567 10568 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10569 /* 10570 * This probe was created in a generation for which this 10571 * enabling has previously created ECBs; we don't want to 10572 * enable it again, so just kick out. 10573 */ 10574 return (DTRACE_MATCH_NEXT); 10575 } 10576 10577 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10578 return (DTRACE_MATCH_DONE); 10579 10580 dtrace_ecb_enable(ecb); 10581 return (DTRACE_MATCH_NEXT); 10582 } 10583 10584 static dtrace_ecb_t * 10585 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10586 { 10587 dtrace_ecb_t *ecb; 10588 10589 ASSERT(MUTEX_HELD(&dtrace_lock)); 10590 10591 if (id == 0 || id > state->dts_necbs) 10592 return (NULL); 10593 10594 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10595 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10596 10597 return (state->dts_ecbs[id - 1]); 10598 } 10599 10600 static dtrace_aggregation_t * 10601 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10602 { 10603 dtrace_aggregation_t *agg; 10604 10605 ASSERT(MUTEX_HELD(&dtrace_lock)); 10606 10607 if (id == 0 || id > state->dts_naggregations) 10608 return (NULL); 10609 10610 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10611 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10612 agg->dtag_id == id); 10613 10614 return (state->dts_aggregations[id - 1]); 10615 } 10616 10617 /* 10618 * DTrace Buffer Functions 10619 * 10620 * The following functions manipulate DTrace buffers. Most of these functions 10621 * are called in the context of establishing or processing consumer state; 10622 * exceptions are explicitly noted. 10623 */ 10624 10625 /* 10626 * Note: called from cross call context. This function switches the two 10627 * buffers on a given CPU. The atomicity of this operation is assured by 10628 * disabling interrupts while the actual switch takes place; the disabling of 10629 * interrupts serializes the execution with any execution of dtrace_probe() on 10630 * the same CPU. 10631 */ 10632 static void 10633 dtrace_buffer_switch(dtrace_buffer_t *buf) 10634 { 10635 caddr_t tomax = buf->dtb_tomax; 10636 caddr_t xamot = buf->dtb_xamot; 10637 dtrace_icookie_t cookie; 10638 10639 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10640 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10641 10642 cookie = dtrace_interrupt_disable(); 10643 buf->dtb_tomax = xamot; 10644 buf->dtb_xamot = tomax; 10645 buf->dtb_xamot_drops = buf->dtb_drops; 10646 buf->dtb_xamot_offset = buf->dtb_offset; 10647 buf->dtb_xamot_errors = buf->dtb_errors; 10648 buf->dtb_xamot_flags = buf->dtb_flags; 10649 buf->dtb_offset = 0; 10650 buf->dtb_drops = 0; 10651 buf->dtb_errors = 0; 10652 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10653 dtrace_interrupt_enable(cookie); 10654 } 10655 10656 /* 10657 * Note: called from cross call context. This function activates a buffer 10658 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10659 * is guaranteed by the disabling of interrupts. 10660 */ 10661 static void 10662 dtrace_buffer_activate(dtrace_state_t *state) 10663 { 10664 dtrace_buffer_t *buf; 10665 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10666 10667 buf = &state->dts_buffer[curcpu]; 10668 10669 if (buf->dtb_tomax != NULL) { 10670 /* 10671 * We might like to assert that the buffer is marked inactive, 10672 * but this isn't necessarily true: the buffer for the CPU 10673 * that processes the BEGIN probe has its buffer activated 10674 * manually. In this case, we take the (harmless) action 10675 * re-clearing the bit INACTIVE bit. 10676 */ 10677 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10678 } 10679 10680 dtrace_interrupt_enable(cookie); 10681 } 10682 10683 static int 10684 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10685 processorid_t cpu) 10686 { 10687 #if defined(sun) 10688 cpu_t *cp; 10689 #endif 10690 dtrace_buffer_t *buf; 10691 10692 #if defined(sun) 10693 ASSERT(MUTEX_HELD(&cpu_lock)); 10694 ASSERT(MUTEX_HELD(&dtrace_lock)); 10695 10696 if (size > dtrace_nonroot_maxsize && 10697 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10698 return (EFBIG); 10699 10700 cp = cpu_list; 10701 10702 do { 10703 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10704 continue; 10705 10706 buf = &bufs[cp->cpu_id]; 10707 10708 /* 10709 * If there is already a buffer allocated for this CPU, it 10710 * is only possible that this is a DR event. In this case, 10711 */ 10712 if (buf->dtb_tomax != NULL) { 10713 ASSERT(buf->dtb_size == size); 10714 continue; 10715 } 10716 10717 ASSERT(buf->dtb_xamot == NULL); 10718 10719 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10720 goto err; 10721 10722 buf->dtb_size = size; 10723 buf->dtb_flags = flags; 10724 buf->dtb_offset = 0; 10725 buf->dtb_drops = 0; 10726 10727 if (flags & DTRACEBUF_NOSWITCH) 10728 continue; 10729 10730 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10731 goto err; 10732 } while ((cp = cp->cpu_next) != cpu_list); 10733 10734 return (0); 10735 10736 err: 10737 cp = cpu_list; 10738 10739 do { 10740 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10741 continue; 10742 10743 buf = &bufs[cp->cpu_id]; 10744 10745 if (buf->dtb_xamot != NULL) { 10746 ASSERT(buf->dtb_tomax != NULL); 10747 ASSERT(buf->dtb_size == size); 10748 kmem_free(buf->dtb_xamot, size); 10749 } 10750 10751 if (buf->dtb_tomax != NULL) { 10752 ASSERT(buf->dtb_size == size); 10753 kmem_free(buf->dtb_tomax, size); 10754 } 10755 10756 buf->dtb_tomax = NULL; 10757 buf->dtb_xamot = NULL; 10758 buf->dtb_size = 0; 10759 } while ((cp = cp->cpu_next) != cpu_list); 10760 10761 return (ENOMEM); 10762 #else 10763 int i; 10764 10765 #if defined(__amd64__) || defined(__mips__) 10766 /* 10767 * FreeBSD isn't good at limiting the amount of memory we 10768 * ask to malloc, so let's place a limit here before trying 10769 * to do something that might well end in tears at bedtime. 10770 */ 10771 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10772 return(ENOMEM); 10773 #endif 10774 10775 ASSERT(MUTEX_HELD(&dtrace_lock)); 10776 CPU_FOREACH(i) { 10777 if (cpu != DTRACE_CPUALL && cpu != i) 10778 continue; 10779 10780 buf = &bufs[i]; 10781 10782 /* 10783 * If there is already a buffer allocated for this CPU, it 10784 * is only possible that this is a DR event. In this case, 10785 * the buffer size must match our specified size. 10786 */ 10787 if (buf->dtb_tomax != NULL) { 10788 ASSERT(buf->dtb_size == size); 10789 continue; 10790 } 10791 10792 ASSERT(buf->dtb_xamot == NULL); 10793 10794 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10795 goto err; 10796 10797 buf->dtb_size = size; 10798 buf->dtb_flags = flags; 10799 buf->dtb_offset = 0; 10800 buf->dtb_drops = 0; 10801 10802 if (flags & DTRACEBUF_NOSWITCH) 10803 continue; 10804 10805 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10806 goto err; 10807 } 10808 10809 return (0); 10810 10811 err: 10812 /* 10813 * Error allocating memory, so free the buffers that were 10814 * allocated before the failed allocation. 10815 */ 10816 CPU_FOREACH(i) { 10817 if (cpu != DTRACE_CPUALL && cpu != i) 10818 continue; 10819 10820 buf = &bufs[i]; 10821 10822 if (buf->dtb_xamot != NULL) { 10823 ASSERT(buf->dtb_tomax != NULL); 10824 ASSERT(buf->dtb_size == size); 10825 kmem_free(buf->dtb_xamot, size); 10826 } 10827 10828 if (buf->dtb_tomax != NULL) { 10829 ASSERT(buf->dtb_size == size); 10830 kmem_free(buf->dtb_tomax, size); 10831 } 10832 10833 buf->dtb_tomax = NULL; 10834 buf->dtb_xamot = NULL; 10835 buf->dtb_size = 0; 10836 10837 } 10838 10839 return (ENOMEM); 10840 #endif 10841 } 10842 10843 /* 10844 * Note: called from probe context. This function just increments the drop 10845 * count on a buffer. It has been made a function to allow for the 10846 * possibility of understanding the source of mysterious drop counts. (A 10847 * problem for which one may be particularly disappointed that DTrace cannot 10848 * be used to understand DTrace.) 10849 */ 10850 static void 10851 dtrace_buffer_drop(dtrace_buffer_t *buf) 10852 { 10853 buf->dtb_drops++; 10854 } 10855 10856 /* 10857 * Note: called from probe context. This function is called to reserve space 10858 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10859 * mstate. Returns the new offset in the buffer, or a negative value if an 10860 * error has occurred. 10861 */ 10862 static intptr_t 10863 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10864 dtrace_state_t *state, dtrace_mstate_t *mstate) 10865 { 10866 intptr_t offs = buf->dtb_offset, soffs; 10867 intptr_t woffs; 10868 caddr_t tomax; 10869 size_t total; 10870 10871 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10872 return (-1); 10873 10874 if ((tomax = buf->dtb_tomax) == NULL) { 10875 dtrace_buffer_drop(buf); 10876 return (-1); 10877 } 10878 10879 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10880 while (offs & (align - 1)) { 10881 /* 10882 * Assert that our alignment is off by a number which 10883 * is itself sizeof (uint32_t) aligned. 10884 */ 10885 ASSERT(!((align - (offs & (align - 1))) & 10886 (sizeof (uint32_t) - 1))); 10887 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10888 offs += sizeof (uint32_t); 10889 } 10890 10891 if ((soffs = offs + needed) > buf->dtb_size) { 10892 dtrace_buffer_drop(buf); 10893 return (-1); 10894 } 10895 10896 if (mstate == NULL) 10897 return (offs); 10898 10899 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10900 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10901 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10902 10903 return (offs); 10904 } 10905 10906 if (buf->dtb_flags & DTRACEBUF_FILL) { 10907 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10908 (buf->dtb_flags & DTRACEBUF_FULL)) 10909 return (-1); 10910 goto out; 10911 } 10912 10913 total = needed + (offs & (align - 1)); 10914 10915 /* 10916 * For a ring buffer, life is quite a bit more complicated. Before 10917 * we can store any padding, we need to adjust our wrapping offset. 10918 * (If we've never before wrapped or we're not about to, no adjustment 10919 * is required.) 10920 */ 10921 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10922 offs + total > buf->dtb_size) { 10923 woffs = buf->dtb_xamot_offset; 10924 10925 if (offs + total > buf->dtb_size) { 10926 /* 10927 * We can't fit in the end of the buffer. First, a 10928 * sanity check that we can fit in the buffer at all. 10929 */ 10930 if (total > buf->dtb_size) { 10931 dtrace_buffer_drop(buf); 10932 return (-1); 10933 } 10934 10935 /* 10936 * We're going to be storing at the top of the buffer, 10937 * so now we need to deal with the wrapped offset. We 10938 * only reset our wrapped offset to 0 if it is 10939 * currently greater than the current offset. If it 10940 * is less than the current offset, it is because a 10941 * previous allocation induced a wrap -- but the 10942 * allocation didn't subsequently take the space due 10943 * to an error or false predicate evaluation. In this 10944 * case, we'll just leave the wrapped offset alone: if 10945 * the wrapped offset hasn't been advanced far enough 10946 * for this allocation, it will be adjusted in the 10947 * lower loop. 10948 */ 10949 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10950 if (woffs >= offs) 10951 woffs = 0; 10952 } else { 10953 woffs = 0; 10954 } 10955 10956 /* 10957 * Now we know that we're going to be storing to the 10958 * top of the buffer and that there is room for us 10959 * there. We need to clear the buffer from the current 10960 * offset to the end (there may be old gunk there). 10961 */ 10962 while (offs < buf->dtb_size) 10963 tomax[offs++] = 0; 10964 10965 /* 10966 * We need to set our offset to zero. And because we 10967 * are wrapping, we need to set the bit indicating as 10968 * much. We can also adjust our needed space back 10969 * down to the space required by the ECB -- we know 10970 * that the top of the buffer is aligned. 10971 */ 10972 offs = 0; 10973 total = needed; 10974 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10975 } else { 10976 /* 10977 * There is room for us in the buffer, so we simply 10978 * need to check the wrapped offset. 10979 */ 10980 if (woffs < offs) { 10981 /* 10982 * The wrapped offset is less than the offset. 10983 * This can happen if we allocated buffer space 10984 * that induced a wrap, but then we didn't 10985 * subsequently take the space due to an error 10986 * or false predicate evaluation. This is 10987 * okay; we know that _this_ allocation isn't 10988 * going to induce a wrap. We still can't 10989 * reset the wrapped offset to be zero, 10990 * however: the space may have been trashed in 10991 * the previous failed probe attempt. But at 10992 * least the wrapped offset doesn't need to 10993 * be adjusted at all... 10994 */ 10995 goto out; 10996 } 10997 } 10998 10999 while (offs + total > woffs) { 11000 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11001 size_t size; 11002 11003 if (epid == DTRACE_EPIDNONE) { 11004 size = sizeof (uint32_t); 11005 } else { 11006 ASSERT(epid <= state->dts_necbs); 11007 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11008 11009 size = state->dts_ecbs[epid - 1]->dte_size; 11010 } 11011 11012 ASSERT(woffs + size <= buf->dtb_size); 11013 ASSERT(size != 0); 11014 11015 if (woffs + size == buf->dtb_size) { 11016 /* 11017 * We've reached the end of the buffer; we want 11018 * to set the wrapped offset to 0 and break 11019 * out. However, if the offs is 0, then we're 11020 * in a strange edge-condition: the amount of 11021 * space that we want to reserve plus the size 11022 * of the record that we're overwriting is 11023 * greater than the size of the buffer. This 11024 * is problematic because if we reserve the 11025 * space but subsequently don't consume it (due 11026 * to a failed predicate or error) the wrapped 11027 * offset will be 0 -- yet the EPID at offset 0 11028 * will not be committed. This situation is 11029 * relatively easy to deal with: if we're in 11030 * this case, the buffer is indistinguishable 11031 * from one that hasn't wrapped; we need only 11032 * finish the job by clearing the wrapped bit, 11033 * explicitly setting the offset to be 0, and 11034 * zero'ing out the old data in the buffer. 11035 */ 11036 if (offs == 0) { 11037 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11038 buf->dtb_offset = 0; 11039 woffs = total; 11040 11041 while (woffs < buf->dtb_size) 11042 tomax[woffs++] = 0; 11043 } 11044 11045 woffs = 0; 11046 break; 11047 } 11048 11049 woffs += size; 11050 } 11051 11052 /* 11053 * We have a wrapped offset. It may be that the wrapped offset 11054 * has become zero -- that's okay. 11055 */ 11056 buf->dtb_xamot_offset = woffs; 11057 } 11058 11059 out: 11060 /* 11061 * Now we can plow the buffer with any necessary padding. 11062 */ 11063 while (offs & (align - 1)) { 11064 /* 11065 * Assert that our alignment is off by a number which 11066 * is itself sizeof (uint32_t) aligned. 11067 */ 11068 ASSERT(!((align - (offs & (align - 1))) & 11069 (sizeof (uint32_t) - 1))); 11070 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11071 offs += sizeof (uint32_t); 11072 } 11073 11074 if (buf->dtb_flags & DTRACEBUF_FILL) { 11075 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11076 buf->dtb_flags |= DTRACEBUF_FULL; 11077 return (-1); 11078 } 11079 } 11080 11081 if (mstate == NULL) 11082 return (offs); 11083 11084 /* 11085 * For ring buffers and fill buffers, the scratch space is always 11086 * the inactive buffer. 11087 */ 11088 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11089 mstate->dtms_scratch_size = buf->dtb_size; 11090 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11091 11092 return (offs); 11093 } 11094 11095 static void 11096 dtrace_buffer_polish(dtrace_buffer_t *buf) 11097 { 11098 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11099 ASSERT(MUTEX_HELD(&dtrace_lock)); 11100 11101 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11102 return; 11103 11104 /* 11105 * We need to polish the ring buffer. There are three cases: 11106 * 11107 * - The first (and presumably most common) is that there is no gap 11108 * between the buffer offset and the wrapped offset. In this case, 11109 * there is nothing in the buffer that isn't valid data; we can 11110 * mark the buffer as polished and return. 11111 * 11112 * - The second (less common than the first but still more common 11113 * than the third) is that there is a gap between the buffer offset 11114 * and the wrapped offset, and the wrapped offset is larger than the 11115 * buffer offset. This can happen because of an alignment issue, or 11116 * can happen because of a call to dtrace_buffer_reserve() that 11117 * didn't subsequently consume the buffer space. In this case, 11118 * we need to zero the data from the buffer offset to the wrapped 11119 * offset. 11120 * 11121 * - The third (and least common) is that there is a gap between the 11122 * buffer offset and the wrapped offset, but the wrapped offset is 11123 * _less_ than the buffer offset. This can only happen because a 11124 * call to dtrace_buffer_reserve() induced a wrap, but the space 11125 * was not subsequently consumed. In this case, we need to zero the 11126 * space from the offset to the end of the buffer _and_ from the 11127 * top of the buffer to the wrapped offset. 11128 */ 11129 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11130 bzero(buf->dtb_tomax + buf->dtb_offset, 11131 buf->dtb_xamot_offset - buf->dtb_offset); 11132 } 11133 11134 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11135 bzero(buf->dtb_tomax + buf->dtb_offset, 11136 buf->dtb_size - buf->dtb_offset); 11137 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11138 } 11139 } 11140 11141 static void 11142 dtrace_buffer_free(dtrace_buffer_t *bufs) 11143 { 11144 int i; 11145 11146 for (i = 0; i < NCPU; i++) { 11147 dtrace_buffer_t *buf = &bufs[i]; 11148 11149 if (buf->dtb_tomax == NULL) { 11150 ASSERT(buf->dtb_xamot == NULL); 11151 ASSERT(buf->dtb_size == 0); 11152 continue; 11153 } 11154 11155 if (buf->dtb_xamot != NULL) { 11156 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11157 kmem_free(buf->dtb_xamot, buf->dtb_size); 11158 } 11159 11160 kmem_free(buf->dtb_tomax, buf->dtb_size); 11161 buf->dtb_size = 0; 11162 buf->dtb_tomax = NULL; 11163 buf->dtb_xamot = NULL; 11164 } 11165 } 11166 11167 /* 11168 * DTrace Enabling Functions 11169 */ 11170 static dtrace_enabling_t * 11171 dtrace_enabling_create(dtrace_vstate_t *vstate) 11172 { 11173 dtrace_enabling_t *enab; 11174 11175 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11176 enab->dten_vstate = vstate; 11177 11178 return (enab); 11179 } 11180 11181 static void 11182 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11183 { 11184 dtrace_ecbdesc_t **ndesc; 11185 size_t osize, nsize; 11186 11187 /* 11188 * We can't add to enablings after we've enabled them, or after we've 11189 * retained them. 11190 */ 11191 ASSERT(enab->dten_probegen == 0); 11192 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11193 11194 if (enab->dten_ndesc < enab->dten_maxdesc) { 11195 enab->dten_desc[enab->dten_ndesc++] = ecb; 11196 return; 11197 } 11198 11199 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11200 11201 if (enab->dten_maxdesc == 0) { 11202 enab->dten_maxdesc = 1; 11203 } else { 11204 enab->dten_maxdesc <<= 1; 11205 } 11206 11207 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11208 11209 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11210 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11211 bcopy(enab->dten_desc, ndesc, osize); 11212 if (enab->dten_desc != NULL) 11213 kmem_free(enab->dten_desc, osize); 11214 11215 enab->dten_desc = ndesc; 11216 enab->dten_desc[enab->dten_ndesc++] = ecb; 11217 } 11218 11219 static void 11220 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11221 dtrace_probedesc_t *pd) 11222 { 11223 dtrace_ecbdesc_t *new; 11224 dtrace_predicate_t *pred; 11225 dtrace_actdesc_t *act; 11226 11227 /* 11228 * We're going to create a new ECB description that matches the 11229 * specified ECB in every way, but has the specified probe description. 11230 */ 11231 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11232 11233 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11234 dtrace_predicate_hold(pred); 11235 11236 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11237 dtrace_actdesc_hold(act); 11238 11239 new->dted_action = ecb->dted_action; 11240 new->dted_pred = ecb->dted_pred; 11241 new->dted_probe = *pd; 11242 new->dted_uarg = ecb->dted_uarg; 11243 11244 dtrace_enabling_add(enab, new); 11245 } 11246 11247 static void 11248 dtrace_enabling_dump(dtrace_enabling_t *enab) 11249 { 11250 int i; 11251 11252 for (i = 0; i < enab->dten_ndesc; i++) { 11253 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11254 11255 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11256 desc->dtpd_provider, desc->dtpd_mod, 11257 desc->dtpd_func, desc->dtpd_name); 11258 } 11259 } 11260 11261 static void 11262 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11263 { 11264 int i; 11265 dtrace_ecbdesc_t *ep; 11266 dtrace_vstate_t *vstate = enab->dten_vstate; 11267 11268 ASSERT(MUTEX_HELD(&dtrace_lock)); 11269 11270 for (i = 0; i < enab->dten_ndesc; i++) { 11271 dtrace_actdesc_t *act, *next; 11272 dtrace_predicate_t *pred; 11273 11274 ep = enab->dten_desc[i]; 11275 11276 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11277 dtrace_predicate_release(pred, vstate); 11278 11279 for (act = ep->dted_action; act != NULL; act = next) { 11280 next = act->dtad_next; 11281 dtrace_actdesc_release(act, vstate); 11282 } 11283 11284 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11285 } 11286 11287 if (enab->dten_desc != NULL) 11288 kmem_free(enab->dten_desc, 11289 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11290 11291 /* 11292 * If this was a retained enabling, decrement the dts_nretained count 11293 * and take it off of the dtrace_retained list. 11294 */ 11295 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11296 dtrace_retained == enab) { 11297 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11298 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11299 enab->dten_vstate->dtvs_state->dts_nretained--; 11300 } 11301 11302 if (enab->dten_prev == NULL) { 11303 if (dtrace_retained == enab) { 11304 dtrace_retained = enab->dten_next; 11305 11306 if (dtrace_retained != NULL) 11307 dtrace_retained->dten_prev = NULL; 11308 } 11309 } else { 11310 ASSERT(enab != dtrace_retained); 11311 ASSERT(dtrace_retained != NULL); 11312 enab->dten_prev->dten_next = enab->dten_next; 11313 } 11314 11315 if (enab->dten_next != NULL) { 11316 ASSERT(dtrace_retained != NULL); 11317 enab->dten_next->dten_prev = enab->dten_prev; 11318 } 11319 11320 kmem_free(enab, sizeof (dtrace_enabling_t)); 11321 } 11322 11323 static int 11324 dtrace_enabling_retain(dtrace_enabling_t *enab) 11325 { 11326 dtrace_state_t *state; 11327 11328 ASSERT(MUTEX_HELD(&dtrace_lock)); 11329 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11330 ASSERT(enab->dten_vstate != NULL); 11331 11332 state = enab->dten_vstate->dtvs_state; 11333 ASSERT(state != NULL); 11334 11335 /* 11336 * We only allow each state to retain dtrace_retain_max enablings. 11337 */ 11338 if (state->dts_nretained >= dtrace_retain_max) 11339 return (ENOSPC); 11340 11341 state->dts_nretained++; 11342 11343 if (dtrace_retained == NULL) { 11344 dtrace_retained = enab; 11345 return (0); 11346 } 11347 11348 enab->dten_next = dtrace_retained; 11349 dtrace_retained->dten_prev = enab; 11350 dtrace_retained = enab; 11351 11352 return (0); 11353 } 11354 11355 static int 11356 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11357 dtrace_probedesc_t *create) 11358 { 11359 dtrace_enabling_t *new, *enab; 11360 int found = 0, err = ENOENT; 11361 11362 ASSERT(MUTEX_HELD(&dtrace_lock)); 11363 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11364 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11365 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11366 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11367 11368 new = dtrace_enabling_create(&state->dts_vstate); 11369 11370 /* 11371 * Iterate over all retained enablings, looking for enablings that 11372 * match the specified state. 11373 */ 11374 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11375 int i; 11376 11377 /* 11378 * dtvs_state can only be NULL for helper enablings -- and 11379 * helper enablings can't be retained. 11380 */ 11381 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11382 11383 if (enab->dten_vstate->dtvs_state != state) 11384 continue; 11385 11386 /* 11387 * Now iterate over each probe description; we're looking for 11388 * an exact match to the specified probe description. 11389 */ 11390 for (i = 0; i < enab->dten_ndesc; i++) { 11391 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11392 dtrace_probedesc_t *pd = &ep->dted_probe; 11393 11394 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11395 continue; 11396 11397 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11398 continue; 11399 11400 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11401 continue; 11402 11403 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11404 continue; 11405 11406 /* 11407 * We have a winning probe! Add it to our growing 11408 * enabling. 11409 */ 11410 found = 1; 11411 dtrace_enabling_addlike(new, ep, create); 11412 } 11413 } 11414 11415 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11416 dtrace_enabling_destroy(new); 11417 return (err); 11418 } 11419 11420 return (0); 11421 } 11422 11423 static void 11424 dtrace_enabling_retract(dtrace_state_t *state) 11425 { 11426 dtrace_enabling_t *enab, *next; 11427 11428 ASSERT(MUTEX_HELD(&dtrace_lock)); 11429 11430 /* 11431 * Iterate over all retained enablings, destroy the enablings retained 11432 * for the specified state. 11433 */ 11434 for (enab = dtrace_retained; enab != NULL; enab = next) { 11435 next = enab->dten_next; 11436 11437 /* 11438 * dtvs_state can only be NULL for helper enablings -- and 11439 * helper enablings can't be retained. 11440 */ 11441 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11442 11443 if (enab->dten_vstate->dtvs_state == state) { 11444 ASSERT(state->dts_nretained > 0); 11445 dtrace_enabling_destroy(enab); 11446 } 11447 } 11448 11449 ASSERT(state->dts_nretained == 0); 11450 } 11451 11452 static int 11453 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11454 { 11455 int i = 0; 11456 int matched = 0; 11457 11458 ASSERT(MUTEX_HELD(&cpu_lock)); 11459 ASSERT(MUTEX_HELD(&dtrace_lock)); 11460 11461 for (i = 0; i < enab->dten_ndesc; i++) { 11462 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11463 11464 enab->dten_current = ep; 11465 enab->dten_error = 0; 11466 11467 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11468 11469 if (enab->dten_error != 0) { 11470 /* 11471 * If we get an error half-way through enabling the 11472 * probes, we kick out -- perhaps with some number of 11473 * them enabled. Leaving enabled probes enabled may 11474 * be slightly confusing for user-level, but we expect 11475 * that no one will attempt to actually drive on in 11476 * the face of such errors. If this is an anonymous 11477 * enabling (indicated with a NULL nmatched pointer), 11478 * we cmn_err() a message. We aren't expecting to 11479 * get such an error -- such as it can exist at all, 11480 * it would be a result of corrupted DOF in the driver 11481 * properties. 11482 */ 11483 if (nmatched == NULL) { 11484 cmn_err(CE_WARN, "dtrace_enabling_match() " 11485 "error on %p: %d", (void *)ep, 11486 enab->dten_error); 11487 } 11488 11489 return (enab->dten_error); 11490 } 11491 } 11492 11493 enab->dten_probegen = dtrace_probegen; 11494 if (nmatched != NULL) 11495 *nmatched = matched; 11496 11497 return (0); 11498 } 11499 11500 static void 11501 dtrace_enabling_matchall(void) 11502 { 11503 dtrace_enabling_t *enab; 11504 11505 mutex_enter(&cpu_lock); 11506 mutex_enter(&dtrace_lock); 11507 11508 /* 11509 * Iterate over all retained enablings to see if any probes match 11510 * against them. We only perform this operation on enablings for which 11511 * we have sufficient permissions by virtue of being in the global zone 11512 * or in the same zone as the DTrace client. Because we can be called 11513 * after dtrace_detach() has been called, we cannot assert that there 11514 * are retained enablings. We can safely load from dtrace_retained, 11515 * however: the taskq_destroy() at the end of dtrace_detach() will 11516 * block pending our completion. 11517 */ 11518 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11519 #if defined(sun) 11520 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11521 11522 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11523 #endif 11524 (void) dtrace_enabling_match(enab, NULL); 11525 } 11526 11527 mutex_exit(&dtrace_lock); 11528 mutex_exit(&cpu_lock); 11529 } 11530 11531 /* 11532 * If an enabling is to be enabled without having matched probes (that is, if 11533 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11534 * enabling must be _primed_ by creating an ECB for every ECB description. 11535 * This must be done to assure that we know the number of speculations, the 11536 * number of aggregations, the minimum buffer size needed, etc. before we 11537 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11538 * enabling any probes, we create ECBs for every ECB decription, but with a 11539 * NULL probe -- which is exactly what this function does. 11540 */ 11541 static void 11542 dtrace_enabling_prime(dtrace_state_t *state) 11543 { 11544 dtrace_enabling_t *enab; 11545 int i; 11546 11547 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11548 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11549 11550 if (enab->dten_vstate->dtvs_state != state) 11551 continue; 11552 11553 /* 11554 * We don't want to prime an enabling more than once, lest 11555 * we allow a malicious user to induce resource exhaustion. 11556 * (The ECBs that result from priming an enabling aren't 11557 * leaked -- but they also aren't deallocated until the 11558 * consumer state is destroyed.) 11559 */ 11560 if (enab->dten_primed) 11561 continue; 11562 11563 for (i = 0; i < enab->dten_ndesc; i++) { 11564 enab->dten_current = enab->dten_desc[i]; 11565 (void) dtrace_probe_enable(NULL, enab); 11566 } 11567 11568 enab->dten_primed = 1; 11569 } 11570 } 11571 11572 /* 11573 * Called to indicate that probes should be provided due to retained 11574 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11575 * must take an initial lap through the enabling calling the dtps_provide() 11576 * entry point explicitly to allow for autocreated probes. 11577 */ 11578 static void 11579 dtrace_enabling_provide(dtrace_provider_t *prv) 11580 { 11581 int i, all = 0; 11582 dtrace_probedesc_t desc; 11583 11584 ASSERT(MUTEX_HELD(&dtrace_lock)); 11585 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11586 11587 if (prv == NULL) { 11588 all = 1; 11589 prv = dtrace_provider; 11590 } 11591 11592 do { 11593 dtrace_enabling_t *enab = dtrace_retained; 11594 void *parg = prv->dtpv_arg; 11595 11596 for (; enab != NULL; enab = enab->dten_next) { 11597 for (i = 0; i < enab->dten_ndesc; i++) { 11598 desc = enab->dten_desc[i]->dted_probe; 11599 mutex_exit(&dtrace_lock); 11600 prv->dtpv_pops.dtps_provide(parg, &desc); 11601 mutex_enter(&dtrace_lock); 11602 } 11603 } 11604 } while (all && (prv = prv->dtpv_next) != NULL); 11605 11606 mutex_exit(&dtrace_lock); 11607 dtrace_probe_provide(NULL, all ? NULL : prv); 11608 mutex_enter(&dtrace_lock); 11609 } 11610 11611 /* 11612 * DTrace DOF Functions 11613 */ 11614 /*ARGSUSED*/ 11615 static void 11616 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11617 { 11618 if (dtrace_err_verbose) 11619 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11620 11621 #ifdef DTRACE_ERRDEBUG 11622 dtrace_errdebug(str); 11623 #endif 11624 } 11625 11626 /* 11627 * Create DOF out of a currently enabled state. Right now, we only create 11628 * DOF containing the run-time options -- but this could be expanded to create 11629 * complete DOF representing the enabled state. 11630 */ 11631 static dof_hdr_t * 11632 dtrace_dof_create(dtrace_state_t *state) 11633 { 11634 dof_hdr_t *dof; 11635 dof_sec_t *sec; 11636 dof_optdesc_t *opt; 11637 int i, len = sizeof (dof_hdr_t) + 11638 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11639 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11640 11641 ASSERT(MUTEX_HELD(&dtrace_lock)); 11642 11643 dof = kmem_zalloc(len, KM_SLEEP); 11644 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11645 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11646 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11647 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11648 11649 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11650 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11651 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11652 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11653 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11654 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11655 11656 dof->dofh_flags = 0; 11657 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11658 dof->dofh_secsize = sizeof (dof_sec_t); 11659 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11660 dof->dofh_secoff = sizeof (dof_hdr_t); 11661 dof->dofh_loadsz = len; 11662 dof->dofh_filesz = len; 11663 dof->dofh_pad = 0; 11664 11665 /* 11666 * Fill in the option section header... 11667 */ 11668 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11669 sec->dofs_type = DOF_SECT_OPTDESC; 11670 sec->dofs_align = sizeof (uint64_t); 11671 sec->dofs_flags = DOF_SECF_LOAD; 11672 sec->dofs_entsize = sizeof (dof_optdesc_t); 11673 11674 opt = (dof_optdesc_t *)((uintptr_t)sec + 11675 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11676 11677 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11678 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11679 11680 for (i = 0; i < DTRACEOPT_MAX; i++) { 11681 opt[i].dofo_option = i; 11682 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11683 opt[i].dofo_value = state->dts_options[i]; 11684 } 11685 11686 return (dof); 11687 } 11688 11689 static dof_hdr_t * 11690 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11691 { 11692 dof_hdr_t hdr, *dof; 11693 11694 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11695 11696 /* 11697 * First, we're going to copyin() the sizeof (dof_hdr_t). 11698 */ 11699 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11700 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11701 *errp = EFAULT; 11702 return (NULL); 11703 } 11704 11705 /* 11706 * Now we'll allocate the entire DOF and copy it in -- provided 11707 * that the length isn't outrageous. 11708 */ 11709 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11710 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11711 *errp = E2BIG; 11712 return (NULL); 11713 } 11714 11715 if (hdr.dofh_loadsz < sizeof (hdr)) { 11716 dtrace_dof_error(&hdr, "invalid load size"); 11717 *errp = EINVAL; 11718 return (NULL); 11719 } 11720 11721 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11722 11723 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11724 kmem_free(dof, hdr.dofh_loadsz); 11725 *errp = EFAULT; 11726 return (NULL); 11727 } 11728 11729 return (dof); 11730 } 11731 11732 #if !defined(sun) 11733 static __inline uchar_t 11734 dtrace_dof_char(char c) { 11735 switch (c) { 11736 case '0': 11737 case '1': 11738 case '2': 11739 case '3': 11740 case '4': 11741 case '5': 11742 case '6': 11743 case '7': 11744 case '8': 11745 case '9': 11746 return (c - '0'); 11747 case 'A': 11748 case 'B': 11749 case 'C': 11750 case 'D': 11751 case 'E': 11752 case 'F': 11753 return (c - 'A' + 10); 11754 case 'a': 11755 case 'b': 11756 case 'c': 11757 case 'd': 11758 case 'e': 11759 case 'f': 11760 return (c - 'a' + 10); 11761 } 11762 /* Should not reach here. */ 11763 return (0); 11764 } 11765 #endif 11766 11767 static dof_hdr_t * 11768 dtrace_dof_property(const char *name) 11769 { 11770 uchar_t *buf; 11771 uint64_t loadsz; 11772 unsigned int len, i; 11773 dof_hdr_t *dof; 11774 11775 #if defined(sun) 11776 /* 11777 * Unfortunately, array of values in .conf files are always (and 11778 * only) interpreted to be integer arrays. We must read our DOF 11779 * as an integer array, and then squeeze it into a byte array. 11780 */ 11781 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11782 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11783 return (NULL); 11784 11785 for (i = 0; i < len; i++) 11786 buf[i] = (uchar_t)(((int *)buf)[i]); 11787 11788 if (len < sizeof (dof_hdr_t)) { 11789 ddi_prop_free(buf); 11790 dtrace_dof_error(NULL, "truncated header"); 11791 return (NULL); 11792 } 11793 11794 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11795 ddi_prop_free(buf); 11796 dtrace_dof_error(NULL, "truncated DOF"); 11797 return (NULL); 11798 } 11799 11800 if (loadsz >= dtrace_dof_maxsize) { 11801 ddi_prop_free(buf); 11802 dtrace_dof_error(NULL, "oversized DOF"); 11803 return (NULL); 11804 } 11805 11806 dof = kmem_alloc(loadsz, KM_SLEEP); 11807 bcopy(buf, dof, loadsz); 11808 ddi_prop_free(buf); 11809 #else 11810 char *p; 11811 char *p_env; 11812 11813 if ((p_env = getenv(name)) == NULL) 11814 return (NULL); 11815 11816 len = strlen(p_env) / 2; 11817 11818 buf = kmem_alloc(len, KM_SLEEP); 11819 11820 dof = (dof_hdr_t *) buf; 11821 11822 p = p_env; 11823 11824 for (i = 0; i < len; i++) { 11825 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11826 dtrace_dof_char(p[1]); 11827 p += 2; 11828 } 11829 11830 freeenv(p_env); 11831 11832 if (len < sizeof (dof_hdr_t)) { 11833 kmem_free(buf, 0); 11834 dtrace_dof_error(NULL, "truncated header"); 11835 return (NULL); 11836 } 11837 11838 if (len < (loadsz = dof->dofh_loadsz)) { 11839 kmem_free(buf, 0); 11840 dtrace_dof_error(NULL, "truncated DOF"); 11841 return (NULL); 11842 } 11843 11844 if (loadsz >= dtrace_dof_maxsize) { 11845 kmem_free(buf, 0); 11846 dtrace_dof_error(NULL, "oversized DOF"); 11847 return (NULL); 11848 } 11849 #endif 11850 11851 return (dof); 11852 } 11853 11854 static void 11855 dtrace_dof_destroy(dof_hdr_t *dof) 11856 { 11857 kmem_free(dof, dof->dofh_loadsz); 11858 } 11859 11860 /* 11861 * Return the dof_sec_t pointer corresponding to a given section index. If the 11862 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11863 * a type other than DOF_SECT_NONE is specified, the header is checked against 11864 * this type and NULL is returned if the types do not match. 11865 */ 11866 static dof_sec_t * 11867 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11868 { 11869 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11870 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11871 11872 if (i >= dof->dofh_secnum) { 11873 dtrace_dof_error(dof, "referenced section index is invalid"); 11874 return (NULL); 11875 } 11876 11877 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11878 dtrace_dof_error(dof, "referenced section is not loadable"); 11879 return (NULL); 11880 } 11881 11882 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11883 dtrace_dof_error(dof, "referenced section is the wrong type"); 11884 return (NULL); 11885 } 11886 11887 return (sec); 11888 } 11889 11890 static dtrace_probedesc_t * 11891 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11892 { 11893 dof_probedesc_t *probe; 11894 dof_sec_t *strtab; 11895 uintptr_t daddr = (uintptr_t)dof; 11896 uintptr_t str; 11897 size_t size; 11898 11899 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11900 dtrace_dof_error(dof, "invalid probe section"); 11901 return (NULL); 11902 } 11903 11904 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11905 dtrace_dof_error(dof, "bad alignment in probe description"); 11906 return (NULL); 11907 } 11908 11909 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11910 dtrace_dof_error(dof, "truncated probe description"); 11911 return (NULL); 11912 } 11913 11914 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11915 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11916 11917 if (strtab == NULL) 11918 return (NULL); 11919 11920 str = daddr + strtab->dofs_offset; 11921 size = strtab->dofs_size; 11922 11923 if (probe->dofp_provider >= strtab->dofs_size) { 11924 dtrace_dof_error(dof, "corrupt probe provider"); 11925 return (NULL); 11926 } 11927 11928 (void) strncpy(desc->dtpd_provider, 11929 (char *)(str + probe->dofp_provider), 11930 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11931 11932 if (probe->dofp_mod >= strtab->dofs_size) { 11933 dtrace_dof_error(dof, "corrupt probe module"); 11934 return (NULL); 11935 } 11936 11937 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11938 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11939 11940 if (probe->dofp_func >= strtab->dofs_size) { 11941 dtrace_dof_error(dof, "corrupt probe function"); 11942 return (NULL); 11943 } 11944 11945 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11946 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11947 11948 if (probe->dofp_name >= strtab->dofs_size) { 11949 dtrace_dof_error(dof, "corrupt probe name"); 11950 return (NULL); 11951 } 11952 11953 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11954 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11955 11956 return (desc); 11957 } 11958 11959 static dtrace_difo_t * 11960 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11961 cred_t *cr) 11962 { 11963 dtrace_difo_t *dp; 11964 size_t ttl = 0; 11965 dof_difohdr_t *dofd; 11966 uintptr_t daddr = (uintptr_t)dof; 11967 size_t max = dtrace_difo_maxsize; 11968 int i, l, n; 11969 11970 static const struct { 11971 int section; 11972 int bufoffs; 11973 int lenoffs; 11974 int entsize; 11975 int align; 11976 const char *msg; 11977 } difo[] = { 11978 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11979 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11980 sizeof (dif_instr_t), "multiple DIF sections" }, 11981 11982 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11983 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11984 sizeof (uint64_t), "multiple integer tables" }, 11985 11986 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11987 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11988 sizeof (char), "multiple string tables" }, 11989 11990 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11991 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11992 sizeof (uint_t), "multiple variable tables" }, 11993 11994 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 11995 }; 11996 11997 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11998 dtrace_dof_error(dof, "invalid DIFO header section"); 11999 return (NULL); 12000 } 12001 12002 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12003 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12004 return (NULL); 12005 } 12006 12007 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12008 sec->dofs_size % sizeof (dof_secidx_t)) { 12009 dtrace_dof_error(dof, "bad size in DIFO header"); 12010 return (NULL); 12011 } 12012 12013 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12014 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12015 12016 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12017 dp->dtdo_rtype = dofd->dofd_rtype; 12018 12019 for (l = 0; l < n; l++) { 12020 dof_sec_t *subsec; 12021 void **bufp; 12022 uint32_t *lenp; 12023 12024 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12025 dofd->dofd_links[l])) == NULL) 12026 goto err; /* invalid section link */ 12027 12028 if (ttl + subsec->dofs_size > max) { 12029 dtrace_dof_error(dof, "exceeds maximum size"); 12030 goto err; 12031 } 12032 12033 ttl += subsec->dofs_size; 12034 12035 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12036 if (subsec->dofs_type != difo[i].section) 12037 continue; 12038 12039 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12040 dtrace_dof_error(dof, "section not loaded"); 12041 goto err; 12042 } 12043 12044 if (subsec->dofs_align != difo[i].align) { 12045 dtrace_dof_error(dof, "bad alignment"); 12046 goto err; 12047 } 12048 12049 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12050 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12051 12052 if (*bufp != NULL) { 12053 dtrace_dof_error(dof, difo[i].msg); 12054 goto err; 12055 } 12056 12057 if (difo[i].entsize != subsec->dofs_entsize) { 12058 dtrace_dof_error(dof, "entry size mismatch"); 12059 goto err; 12060 } 12061 12062 if (subsec->dofs_entsize != 0 && 12063 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12064 dtrace_dof_error(dof, "corrupt entry size"); 12065 goto err; 12066 } 12067 12068 *lenp = subsec->dofs_size; 12069 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12070 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12071 *bufp, subsec->dofs_size); 12072 12073 if (subsec->dofs_entsize != 0) 12074 *lenp /= subsec->dofs_entsize; 12075 12076 break; 12077 } 12078 12079 /* 12080 * If we encounter a loadable DIFO sub-section that is not 12081 * known to us, assume this is a broken program and fail. 12082 */ 12083 if (difo[i].section == DOF_SECT_NONE && 12084 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12085 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12086 goto err; 12087 } 12088 } 12089 12090 if (dp->dtdo_buf == NULL) { 12091 /* 12092 * We can't have a DIF object without DIF text. 12093 */ 12094 dtrace_dof_error(dof, "missing DIF text"); 12095 goto err; 12096 } 12097 12098 /* 12099 * Before we validate the DIF object, run through the variable table 12100 * looking for the strings -- if any of their size are under, we'll set 12101 * their size to be the system-wide default string size. Note that 12102 * this should _not_ happen if the "strsize" option has been set -- 12103 * in this case, the compiler should have set the size to reflect the 12104 * setting of the option. 12105 */ 12106 for (i = 0; i < dp->dtdo_varlen; i++) { 12107 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12108 dtrace_diftype_t *t = &v->dtdv_type; 12109 12110 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12111 continue; 12112 12113 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12114 t->dtdt_size = dtrace_strsize_default; 12115 } 12116 12117 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12118 goto err; 12119 12120 dtrace_difo_init(dp, vstate); 12121 return (dp); 12122 12123 err: 12124 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12125 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12126 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12127 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12128 12129 kmem_free(dp, sizeof (dtrace_difo_t)); 12130 return (NULL); 12131 } 12132 12133 static dtrace_predicate_t * 12134 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12135 cred_t *cr) 12136 { 12137 dtrace_difo_t *dp; 12138 12139 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12140 return (NULL); 12141 12142 return (dtrace_predicate_create(dp)); 12143 } 12144 12145 static dtrace_actdesc_t * 12146 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12147 cred_t *cr) 12148 { 12149 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12150 dof_actdesc_t *desc; 12151 dof_sec_t *difosec; 12152 size_t offs; 12153 uintptr_t daddr = (uintptr_t)dof; 12154 uint64_t arg; 12155 dtrace_actkind_t kind; 12156 12157 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12158 dtrace_dof_error(dof, "invalid action section"); 12159 return (NULL); 12160 } 12161 12162 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12163 dtrace_dof_error(dof, "truncated action description"); 12164 return (NULL); 12165 } 12166 12167 if (sec->dofs_align != sizeof (uint64_t)) { 12168 dtrace_dof_error(dof, "bad alignment in action description"); 12169 return (NULL); 12170 } 12171 12172 if (sec->dofs_size < sec->dofs_entsize) { 12173 dtrace_dof_error(dof, "section entry size exceeds total size"); 12174 return (NULL); 12175 } 12176 12177 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12178 dtrace_dof_error(dof, "bad entry size in action description"); 12179 return (NULL); 12180 } 12181 12182 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12183 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12184 return (NULL); 12185 } 12186 12187 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12188 desc = (dof_actdesc_t *)(daddr + 12189 (uintptr_t)sec->dofs_offset + offs); 12190 kind = (dtrace_actkind_t)desc->dofa_kind; 12191 12192 if (DTRACEACT_ISPRINTFLIKE(kind) && 12193 (kind != DTRACEACT_PRINTA || 12194 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12195 dof_sec_t *strtab; 12196 char *str, *fmt; 12197 uint64_t i; 12198 12199 /* 12200 * printf()-like actions must have a format string. 12201 */ 12202 if ((strtab = dtrace_dof_sect(dof, 12203 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12204 goto err; 12205 12206 str = (char *)((uintptr_t)dof + 12207 (uintptr_t)strtab->dofs_offset); 12208 12209 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12210 if (str[i] == '\0') 12211 break; 12212 } 12213 12214 if (i >= strtab->dofs_size) { 12215 dtrace_dof_error(dof, "bogus format string"); 12216 goto err; 12217 } 12218 12219 if (i == desc->dofa_arg) { 12220 dtrace_dof_error(dof, "empty format string"); 12221 goto err; 12222 } 12223 12224 i -= desc->dofa_arg; 12225 fmt = kmem_alloc(i + 1, KM_SLEEP); 12226 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12227 arg = (uint64_t)(uintptr_t)fmt; 12228 } else { 12229 if (kind == DTRACEACT_PRINTA) { 12230 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12231 arg = 0; 12232 } else { 12233 arg = desc->dofa_arg; 12234 } 12235 } 12236 12237 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12238 desc->dofa_uarg, arg); 12239 12240 if (last != NULL) { 12241 last->dtad_next = act; 12242 } else { 12243 first = act; 12244 } 12245 12246 last = act; 12247 12248 if (desc->dofa_difo == DOF_SECIDX_NONE) 12249 continue; 12250 12251 if ((difosec = dtrace_dof_sect(dof, 12252 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12253 goto err; 12254 12255 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12256 12257 if (act->dtad_difo == NULL) 12258 goto err; 12259 } 12260 12261 ASSERT(first != NULL); 12262 return (first); 12263 12264 err: 12265 for (act = first; act != NULL; act = next) { 12266 next = act->dtad_next; 12267 dtrace_actdesc_release(act, vstate); 12268 } 12269 12270 return (NULL); 12271 } 12272 12273 static dtrace_ecbdesc_t * 12274 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12275 cred_t *cr) 12276 { 12277 dtrace_ecbdesc_t *ep; 12278 dof_ecbdesc_t *ecb; 12279 dtrace_probedesc_t *desc; 12280 dtrace_predicate_t *pred = NULL; 12281 12282 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12283 dtrace_dof_error(dof, "truncated ECB description"); 12284 return (NULL); 12285 } 12286 12287 if (sec->dofs_align != sizeof (uint64_t)) { 12288 dtrace_dof_error(dof, "bad alignment in ECB description"); 12289 return (NULL); 12290 } 12291 12292 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12293 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12294 12295 if (sec == NULL) 12296 return (NULL); 12297 12298 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12299 ep->dted_uarg = ecb->dofe_uarg; 12300 desc = &ep->dted_probe; 12301 12302 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12303 goto err; 12304 12305 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12306 if ((sec = dtrace_dof_sect(dof, 12307 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12308 goto err; 12309 12310 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12311 goto err; 12312 12313 ep->dted_pred.dtpdd_predicate = pred; 12314 } 12315 12316 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12317 if ((sec = dtrace_dof_sect(dof, 12318 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12319 goto err; 12320 12321 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12322 12323 if (ep->dted_action == NULL) 12324 goto err; 12325 } 12326 12327 return (ep); 12328 12329 err: 12330 if (pred != NULL) 12331 dtrace_predicate_release(pred, vstate); 12332 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12333 return (NULL); 12334 } 12335 12336 /* 12337 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12338 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12339 * site of any user SETX relocations to account for load object base address. 12340 * In the future, if we need other relocations, this function can be extended. 12341 */ 12342 static int 12343 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12344 { 12345 uintptr_t daddr = (uintptr_t)dof; 12346 dof_relohdr_t *dofr = 12347 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12348 dof_sec_t *ss, *rs, *ts; 12349 dof_relodesc_t *r; 12350 uint_t i, n; 12351 12352 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12353 sec->dofs_align != sizeof (dof_secidx_t)) { 12354 dtrace_dof_error(dof, "invalid relocation header"); 12355 return (-1); 12356 } 12357 12358 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12359 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12360 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12361 12362 if (ss == NULL || rs == NULL || ts == NULL) 12363 return (-1); /* dtrace_dof_error() has been called already */ 12364 12365 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12366 rs->dofs_align != sizeof (uint64_t)) { 12367 dtrace_dof_error(dof, "invalid relocation section"); 12368 return (-1); 12369 } 12370 12371 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12372 n = rs->dofs_size / rs->dofs_entsize; 12373 12374 for (i = 0; i < n; i++) { 12375 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12376 12377 switch (r->dofr_type) { 12378 case DOF_RELO_NONE: 12379 break; 12380 case DOF_RELO_SETX: 12381 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12382 sizeof (uint64_t) > ts->dofs_size) { 12383 dtrace_dof_error(dof, "bad relocation offset"); 12384 return (-1); 12385 } 12386 12387 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12388 dtrace_dof_error(dof, "misaligned setx relo"); 12389 return (-1); 12390 } 12391 12392 *(uint64_t *)taddr += ubase; 12393 break; 12394 default: 12395 dtrace_dof_error(dof, "invalid relocation type"); 12396 return (-1); 12397 } 12398 12399 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12400 } 12401 12402 return (0); 12403 } 12404 12405 /* 12406 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12407 * header: it should be at the front of a memory region that is at least 12408 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12409 * size. It need not be validated in any other way. 12410 */ 12411 static int 12412 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12413 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12414 { 12415 uint64_t len = dof->dofh_loadsz, seclen; 12416 uintptr_t daddr = (uintptr_t)dof; 12417 dtrace_ecbdesc_t *ep; 12418 dtrace_enabling_t *enab; 12419 uint_t i; 12420 12421 ASSERT(MUTEX_HELD(&dtrace_lock)); 12422 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12423 12424 /* 12425 * Check the DOF header identification bytes. In addition to checking 12426 * valid settings, we also verify that unused bits/bytes are zeroed so 12427 * we can use them later without fear of regressing existing binaries. 12428 */ 12429 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12430 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12431 dtrace_dof_error(dof, "DOF magic string mismatch"); 12432 return (-1); 12433 } 12434 12435 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12436 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12437 dtrace_dof_error(dof, "DOF has invalid data model"); 12438 return (-1); 12439 } 12440 12441 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12442 dtrace_dof_error(dof, "DOF encoding mismatch"); 12443 return (-1); 12444 } 12445 12446 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12447 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12448 dtrace_dof_error(dof, "DOF version mismatch"); 12449 return (-1); 12450 } 12451 12452 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12453 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12454 return (-1); 12455 } 12456 12457 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12458 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12459 return (-1); 12460 } 12461 12462 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12463 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12464 return (-1); 12465 } 12466 12467 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12468 if (dof->dofh_ident[i] != 0) { 12469 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12470 return (-1); 12471 } 12472 } 12473 12474 if (dof->dofh_flags & ~DOF_FL_VALID) { 12475 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12476 return (-1); 12477 } 12478 12479 if (dof->dofh_secsize == 0) { 12480 dtrace_dof_error(dof, "zero section header size"); 12481 return (-1); 12482 } 12483 12484 /* 12485 * Check that the section headers don't exceed the amount of DOF 12486 * data. Note that we cast the section size and number of sections 12487 * to uint64_t's to prevent possible overflow in the multiplication. 12488 */ 12489 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12490 12491 if (dof->dofh_secoff > len || seclen > len || 12492 dof->dofh_secoff + seclen > len) { 12493 dtrace_dof_error(dof, "truncated section headers"); 12494 return (-1); 12495 } 12496 12497 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12498 dtrace_dof_error(dof, "misaligned section headers"); 12499 return (-1); 12500 } 12501 12502 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12503 dtrace_dof_error(dof, "misaligned section size"); 12504 return (-1); 12505 } 12506 12507 /* 12508 * Take an initial pass through the section headers to be sure that 12509 * the headers don't have stray offsets. If the 'noprobes' flag is 12510 * set, do not permit sections relating to providers, probes, or args. 12511 */ 12512 for (i = 0; i < dof->dofh_secnum; i++) { 12513 dof_sec_t *sec = (dof_sec_t *)(daddr + 12514 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12515 12516 if (noprobes) { 12517 switch (sec->dofs_type) { 12518 case DOF_SECT_PROVIDER: 12519 case DOF_SECT_PROBES: 12520 case DOF_SECT_PRARGS: 12521 case DOF_SECT_PROFFS: 12522 dtrace_dof_error(dof, "illegal sections " 12523 "for enabling"); 12524 return (-1); 12525 } 12526 } 12527 12528 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12529 continue; /* just ignore non-loadable sections */ 12530 12531 if (sec->dofs_align & (sec->dofs_align - 1)) { 12532 dtrace_dof_error(dof, "bad section alignment"); 12533 return (-1); 12534 } 12535 12536 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12537 dtrace_dof_error(dof, "misaligned section"); 12538 return (-1); 12539 } 12540 12541 if (sec->dofs_offset > len || sec->dofs_size > len || 12542 sec->dofs_offset + sec->dofs_size > len) { 12543 dtrace_dof_error(dof, "corrupt section header"); 12544 return (-1); 12545 } 12546 12547 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12548 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12549 dtrace_dof_error(dof, "non-terminating string table"); 12550 return (-1); 12551 } 12552 } 12553 12554 /* 12555 * Take a second pass through the sections and locate and perform any 12556 * relocations that are present. We do this after the first pass to 12557 * be sure that all sections have had their headers validated. 12558 */ 12559 for (i = 0; i < dof->dofh_secnum; i++) { 12560 dof_sec_t *sec = (dof_sec_t *)(daddr + 12561 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12562 12563 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12564 continue; /* skip sections that are not loadable */ 12565 12566 switch (sec->dofs_type) { 12567 case DOF_SECT_URELHDR: 12568 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12569 return (-1); 12570 break; 12571 } 12572 } 12573 12574 if ((enab = *enabp) == NULL) 12575 enab = *enabp = dtrace_enabling_create(vstate); 12576 12577 for (i = 0; i < dof->dofh_secnum; i++) { 12578 dof_sec_t *sec = (dof_sec_t *)(daddr + 12579 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12580 12581 if (sec->dofs_type != DOF_SECT_ECBDESC) 12582 continue; 12583 12584 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12585 dtrace_enabling_destroy(enab); 12586 *enabp = NULL; 12587 return (-1); 12588 } 12589 12590 dtrace_enabling_add(enab, ep); 12591 } 12592 12593 return (0); 12594 } 12595 12596 /* 12597 * Process DOF for any options. This routine assumes that the DOF has been 12598 * at least processed by dtrace_dof_slurp(). 12599 */ 12600 static int 12601 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12602 { 12603 int i, rval; 12604 uint32_t entsize; 12605 size_t offs; 12606 dof_optdesc_t *desc; 12607 12608 for (i = 0; i < dof->dofh_secnum; i++) { 12609 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12610 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12611 12612 if (sec->dofs_type != DOF_SECT_OPTDESC) 12613 continue; 12614 12615 if (sec->dofs_align != sizeof (uint64_t)) { 12616 dtrace_dof_error(dof, "bad alignment in " 12617 "option description"); 12618 return (EINVAL); 12619 } 12620 12621 if ((entsize = sec->dofs_entsize) == 0) { 12622 dtrace_dof_error(dof, "zeroed option entry size"); 12623 return (EINVAL); 12624 } 12625 12626 if (entsize < sizeof (dof_optdesc_t)) { 12627 dtrace_dof_error(dof, "bad option entry size"); 12628 return (EINVAL); 12629 } 12630 12631 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12632 desc = (dof_optdesc_t *)((uintptr_t)dof + 12633 (uintptr_t)sec->dofs_offset + offs); 12634 12635 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12636 dtrace_dof_error(dof, "non-zero option string"); 12637 return (EINVAL); 12638 } 12639 12640 if (desc->dofo_value == DTRACEOPT_UNSET) { 12641 dtrace_dof_error(dof, "unset option"); 12642 return (EINVAL); 12643 } 12644 12645 if ((rval = dtrace_state_option(state, 12646 desc->dofo_option, desc->dofo_value)) != 0) { 12647 dtrace_dof_error(dof, "rejected option"); 12648 return (rval); 12649 } 12650 } 12651 } 12652 12653 return (0); 12654 } 12655 12656 /* 12657 * DTrace Consumer State Functions 12658 */ 12659 static int 12660 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12661 { 12662 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12663 void *base; 12664 uintptr_t limit; 12665 dtrace_dynvar_t *dvar, *next, *start; 12666 int i; 12667 12668 ASSERT(MUTEX_HELD(&dtrace_lock)); 12669 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12670 12671 bzero(dstate, sizeof (dtrace_dstate_t)); 12672 12673 if ((dstate->dtds_chunksize = chunksize) == 0) 12674 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12675 12676 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12677 size = min; 12678 12679 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12680 return (ENOMEM); 12681 12682 dstate->dtds_size = size; 12683 dstate->dtds_base = base; 12684 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12685 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12686 12687 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12688 12689 if (hashsize != 1 && (hashsize & 1)) 12690 hashsize--; 12691 12692 dstate->dtds_hashsize = hashsize; 12693 dstate->dtds_hash = dstate->dtds_base; 12694 12695 /* 12696 * Set all of our hash buckets to point to the single sink, and (if 12697 * it hasn't already been set), set the sink's hash value to be the 12698 * sink sentinel value. The sink is needed for dynamic variable 12699 * lookups to know that they have iterated over an entire, valid hash 12700 * chain. 12701 */ 12702 for (i = 0; i < hashsize; i++) 12703 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12704 12705 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12706 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12707 12708 /* 12709 * Determine number of active CPUs. Divide free list evenly among 12710 * active CPUs. 12711 */ 12712 start = (dtrace_dynvar_t *) 12713 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12714 limit = (uintptr_t)base + size; 12715 12716 maxper = (limit - (uintptr_t)start) / NCPU; 12717 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12718 12719 #if !defined(sun) 12720 CPU_FOREACH(i) { 12721 #else 12722 for (i = 0; i < NCPU; i++) { 12723 #endif 12724 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12725 12726 /* 12727 * If we don't even have enough chunks to make it once through 12728 * NCPUs, we're just going to allocate everything to the first 12729 * CPU. And if we're on the last CPU, we're going to allocate 12730 * whatever is left over. In either case, we set the limit to 12731 * be the limit of the dynamic variable space. 12732 */ 12733 if (maxper == 0 || i == NCPU - 1) { 12734 limit = (uintptr_t)base + size; 12735 start = NULL; 12736 } else { 12737 limit = (uintptr_t)start + maxper; 12738 start = (dtrace_dynvar_t *)limit; 12739 } 12740 12741 ASSERT(limit <= (uintptr_t)base + size); 12742 12743 for (;;) { 12744 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12745 dstate->dtds_chunksize); 12746 12747 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12748 break; 12749 12750 dvar->dtdv_next = next; 12751 dvar = next; 12752 } 12753 12754 if (maxper == 0) 12755 break; 12756 } 12757 12758 return (0); 12759 } 12760 12761 static void 12762 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12763 { 12764 ASSERT(MUTEX_HELD(&cpu_lock)); 12765 12766 if (dstate->dtds_base == NULL) 12767 return; 12768 12769 kmem_free(dstate->dtds_base, dstate->dtds_size); 12770 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12771 } 12772 12773 static void 12774 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12775 { 12776 /* 12777 * Logical XOR, where are you? 12778 */ 12779 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12780 12781 if (vstate->dtvs_nglobals > 0) { 12782 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12783 sizeof (dtrace_statvar_t *)); 12784 } 12785 12786 if (vstate->dtvs_ntlocals > 0) { 12787 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12788 sizeof (dtrace_difv_t)); 12789 } 12790 12791 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12792 12793 if (vstate->dtvs_nlocals > 0) { 12794 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12795 sizeof (dtrace_statvar_t *)); 12796 } 12797 } 12798 12799 #if defined(sun) 12800 static void 12801 dtrace_state_clean(dtrace_state_t *state) 12802 { 12803 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12804 return; 12805 12806 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12807 dtrace_speculation_clean(state); 12808 } 12809 12810 static void 12811 dtrace_state_deadman(dtrace_state_t *state) 12812 { 12813 hrtime_t now; 12814 12815 dtrace_sync(); 12816 12817 now = dtrace_gethrtime(); 12818 12819 if (state != dtrace_anon.dta_state && 12820 now - state->dts_laststatus >= dtrace_deadman_user) 12821 return; 12822 12823 /* 12824 * We must be sure that dts_alive never appears to be less than the 12825 * value upon entry to dtrace_state_deadman(), and because we lack a 12826 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12827 * store INT64_MAX to it, followed by a memory barrier, followed by 12828 * the new value. This assures that dts_alive never appears to be 12829 * less than its true value, regardless of the order in which the 12830 * stores to the underlying storage are issued. 12831 */ 12832 state->dts_alive = INT64_MAX; 12833 dtrace_membar_producer(); 12834 state->dts_alive = now; 12835 } 12836 #else 12837 static void 12838 dtrace_state_clean(void *arg) 12839 { 12840 dtrace_state_t *state = arg; 12841 dtrace_optval_t *opt = state->dts_options; 12842 12843 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12844 return; 12845 12846 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12847 dtrace_speculation_clean(state); 12848 12849 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 12850 dtrace_state_clean, state); 12851 } 12852 12853 static void 12854 dtrace_state_deadman(void *arg) 12855 { 12856 dtrace_state_t *state = arg; 12857 hrtime_t now; 12858 12859 dtrace_sync(); 12860 12861 dtrace_debug_output(); 12862 12863 now = dtrace_gethrtime(); 12864 12865 if (state != dtrace_anon.dta_state && 12866 now - state->dts_laststatus >= dtrace_deadman_user) 12867 return; 12868 12869 /* 12870 * We must be sure that dts_alive never appears to be less than the 12871 * value upon entry to dtrace_state_deadman(), and because we lack a 12872 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12873 * store INT64_MAX to it, followed by a memory barrier, followed by 12874 * the new value. This assures that dts_alive never appears to be 12875 * less than its true value, regardless of the order in which the 12876 * stores to the underlying storage are issued. 12877 */ 12878 state->dts_alive = INT64_MAX; 12879 dtrace_membar_producer(); 12880 state->dts_alive = now; 12881 12882 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 12883 dtrace_state_deadman, state); 12884 } 12885 #endif 12886 12887 static dtrace_state_t * 12888 #if defined(sun) 12889 dtrace_state_create(dev_t *devp, cred_t *cr) 12890 #else 12891 dtrace_state_create(struct cdev *dev) 12892 #endif 12893 { 12894 #if defined(sun) 12895 minor_t minor; 12896 major_t major; 12897 #else 12898 cred_t *cr = NULL; 12899 int m = 0; 12900 #endif 12901 char c[30]; 12902 dtrace_state_t *state; 12903 dtrace_optval_t *opt; 12904 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12905 12906 ASSERT(MUTEX_HELD(&dtrace_lock)); 12907 ASSERT(MUTEX_HELD(&cpu_lock)); 12908 12909 #if defined(sun) 12910 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12911 VM_BESTFIT | VM_SLEEP); 12912 12913 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12914 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12915 return (NULL); 12916 } 12917 12918 state = ddi_get_soft_state(dtrace_softstate, minor); 12919 #else 12920 if (dev != NULL) { 12921 cr = dev->si_cred; 12922 m = dev2unit(dev); 12923 } 12924 12925 /* Allocate memory for the state. */ 12926 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12927 #endif 12928 12929 state->dts_epid = DTRACE_EPIDNONE + 1; 12930 12931 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12932 #if defined(sun) 12933 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12934 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12935 12936 if (devp != NULL) { 12937 major = getemajor(*devp); 12938 } else { 12939 major = ddi_driver_major(dtrace_devi); 12940 } 12941 12942 state->dts_dev = makedevice(major, minor); 12943 12944 if (devp != NULL) 12945 *devp = state->dts_dev; 12946 #else 12947 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 12948 state->dts_dev = dev; 12949 #endif 12950 12951 /* 12952 * We allocate NCPU buffers. On the one hand, this can be quite 12953 * a bit of memory per instance (nearly 36K on a Starcat). On the 12954 * other hand, it saves an additional memory reference in the probe 12955 * path. 12956 */ 12957 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12958 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12959 12960 #if defined(sun) 12961 state->dts_cleaner = CYCLIC_NONE; 12962 state->dts_deadman = CYCLIC_NONE; 12963 #else 12964 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 12965 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 12966 #endif 12967 state->dts_vstate.dtvs_state = state; 12968 12969 for (i = 0; i < DTRACEOPT_MAX; i++) 12970 state->dts_options[i] = DTRACEOPT_UNSET; 12971 12972 /* 12973 * Set the default options. 12974 */ 12975 opt = state->dts_options; 12976 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12977 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12978 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12979 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12980 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12981 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12982 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12983 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12984 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12985 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12986 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12987 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12988 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12989 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12990 12991 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12992 12993 /* 12994 * Depending on the user credentials, we set flag bits which alter probe 12995 * visibility or the amount of destructiveness allowed. In the case of 12996 * actual anonymous tracing, or the possession of all privileges, all of 12997 * the normal checks are bypassed. 12998 */ 12999 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13000 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13001 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13002 } else { 13003 /* 13004 * Set up the credentials for this instantiation. We take a 13005 * hold on the credential to prevent it from disappearing on 13006 * us; this in turn prevents the zone_t referenced by this 13007 * credential from disappearing. This means that we can 13008 * examine the credential and the zone from probe context. 13009 */ 13010 crhold(cr); 13011 state->dts_cred.dcr_cred = cr; 13012 13013 /* 13014 * CRA_PROC means "we have *some* privilege for dtrace" and 13015 * unlocks the use of variables like pid, zonename, etc. 13016 */ 13017 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13018 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13019 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13020 } 13021 13022 /* 13023 * dtrace_user allows use of syscall and profile providers. 13024 * If the user also has proc_owner and/or proc_zone, we 13025 * extend the scope to include additional visibility and 13026 * destructive power. 13027 */ 13028 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13029 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13030 state->dts_cred.dcr_visible |= 13031 DTRACE_CRV_ALLPROC; 13032 13033 state->dts_cred.dcr_action |= 13034 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13035 } 13036 13037 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13038 state->dts_cred.dcr_visible |= 13039 DTRACE_CRV_ALLZONE; 13040 13041 state->dts_cred.dcr_action |= 13042 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13043 } 13044 13045 /* 13046 * If we have all privs in whatever zone this is, 13047 * we can do destructive things to processes which 13048 * have altered credentials. 13049 */ 13050 #if defined(sun) 13051 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13052 cr->cr_zone->zone_privset)) { 13053 state->dts_cred.dcr_action |= 13054 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13055 } 13056 #endif 13057 } 13058 13059 /* 13060 * Holding the dtrace_kernel privilege also implies that 13061 * the user has the dtrace_user privilege from a visibility 13062 * perspective. But without further privileges, some 13063 * destructive actions are not available. 13064 */ 13065 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13066 /* 13067 * Make all probes in all zones visible. However, 13068 * this doesn't mean that all actions become available 13069 * to all zones. 13070 */ 13071 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13072 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13073 13074 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13075 DTRACE_CRA_PROC; 13076 /* 13077 * Holding proc_owner means that destructive actions 13078 * for *this* zone are allowed. 13079 */ 13080 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13081 state->dts_cred.dcr_action |= 13082 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13083 13084 /* 13085 * Holding proc_zone means that destructive actions 13086 * for this user/group ID in all zones is allowed. 13087 */ 13088 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13089 state->dts_cred.dcr_action |= 13090 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13091 13092 #if defined(sun) 13093 /* 13094 * If we have all privs in whatever zone this is, 13095 * we can do destructive things to processes which 13096 * have altered credentials. 13097 */ 13098 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13099 cr->cr_zone->zone_privset)) { 13100 state->dts_cred.dcr_action |= 13101 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13102 } 13103 #endif 13104 } 13105 13106 /* 13107 * Holding the dtrace_proc privilege gives control over fasttrap 13108 * and pid providers. We need to grant wider destructive 13109 * privileges in the event that the user has proc_owner and/or 13110 * proc_zone. 13111 */ 13112 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13113 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13114 state->dts_cred.dcr_action |= 13115 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13116 13117 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13118 state->dts_cred.dcr_action |= 13119 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13120 } 13121 } 13122 13123 return (state); 13124 } 13125 13126 static int 13127 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13128 { 13129 dtrace_optval_t *opt = state->dts_options, size; 13130 processorid_t cpu = 0;; 13131 int flags = 0, rval; 13132 13133 ASSERT(MUTEX_HELD(&dtrace_lock)); 13134 ASSERT(MUTEX_HELD(&cpu_lock)); 13135 ASSERT(which < DTRACEOPT_MAX); 13136 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13137 (state == dtrace_anon.dta_state && 13138 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13139 13140 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13141 return (0); 13142 13143 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13144 cpu = opt[DTRACEOPT_CPU]; 13145 13146 if (which == DTRACEOPT_SPECSIZE) 13147 flags |= DTRACEBUF_NOSWITCH; 13148 13149 if (which == DTRACEOPT_BUFSIZE) { 13150 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13151 flags |= DTRACEBUF_RING; 13152 13153 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13154 flags |= DTRACEBUF_FILL; 13155 13156 if (state != dtrace_anon.dta_state || 13157 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13158 flags |= DTRACEBUF_INACTIVE; 13159 } 13160 13161 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13162 /* 13163 * The size must be 8-byte aligned. If the size is not 8-byte 13164 * aligned, drop it down by the difference. 13165 */ 13166 if (size & (sizeof (uint64_t) - 1)) 13167 size -= size & (sizeof (uint64_t) - 1); 13168 13169 if (size < state->dts_reserve) { 13170 /* 13171 * Buffers always must be large enough to accommodate 13172 * their prereserved space. We return E2BIG instead 13173 * of ENOMEM in this case to allow for user-level 13174 * software to differentiate the cases. 13175 */ 13176 return (E2BIG); 13177 } 13178 13179 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13180 13181 if (rval != ENOMEM) { 13182 opt[which] = size; 13183 return (rval); 13184 } 13185 13186 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13187 return (rval); 13188 } 13189 13190 return (ENOMEM); 13191 } 13192 13193 static int 13194 dtrace_state_buffers(dtrace_state_t *state) 13195 { 13196 dtrace_speculation_t *spec = state->dts_speculations; 13197 int rval, i; 13198 13199 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13200 DTRACEOPT_BUFSIZE)) != 0) 13201 return (rval); 13202 13203 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13204 DTRACEOPT_AGGSIZE)) != 0) 13205 return (rval); 13206 13207 for (i = 0; i < state->dts_nspeculations; i++) { 13208 if ((rval = dtrace_state_buffer(state, 13209 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13210 return (rval); 13211 } 13212 13213 return (0); 13214 } 13215 13216 static void 13217 dtrace_state_prereserve(dtrace_state_t *state) 13218 { 13219 dtrace_ecb_t *ecb; 13220 dtrace_probe_t *probe; 13221 13222 state->dts_reserve = 0; 13223 13224 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13225 return; 13226 13227 /* 13228 * If our buffer policy is a "fill" buffer policy, we need to set the 13229 * prereserved space to be the space required by the END probes. 13230 */ 13231 probe = dtrace_probes[dtrace_probeid_end - 1]; 13232 ASSERT(probe != NULL); 13233 13234 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13235 if (ecb->dte_state != state) 13236 continue; 13237 13238 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13239 } 13240 } 13241 13242 static int 13243 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13244 { 13245 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13246 dtrace_speculation_t *spec; 13247 dtrace_buffer_t *buf; 13248 #if defined(sun) 13249 cyc_handler_t hdlr; 13250 cyc_time_t when; 13251 #endif 13252 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13253 dtrace_icookie_t cookie; 13254 13255 mutex_enter(&cpu_lock); 13256 mutex_enter(&dtrace_lock); 13257 13258 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13259 rval = EBUSY; 13260 goto out; 13261 } 13262 13263 /* 13264 * Before we can perform any checks, we must prime all of the 13265 * retained enablings that correspond to this state. 13266 */ 13267 dtrace_enabling_prime(state); 13268 13269 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13270 rval = EACCES; 13271 goto out; 13272 } 13273 13274 dtrace_state_prereserve(state); 13275 13276 /* 13277 * Now we want to do is try to allocate our speculations. 13278 * We do not automatically resize the number of speculations; if 13279 * this fails, we will fail the operation. 13280 */ 13281 nspec = opt[DTRACEOPT_NSPEC]; 13282 ASSERT(nspec != DTRACEOPT_UNSET); 13283 13284 if (nspec > INT_MAX) { 13285 rval = ENOMEM; 13286 goto out; 13287 } 13288 13289 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13290 13291 if (spec == NULL) { 13292 rval = ENOMEM; 13293 goto out; 13294 } 13295 13296 state->dts_speculations = spec; 13297 state->dts_nspeculations = (int)nspec; 13298 13299 for (i = 0; i < nspec; i++) { 13300 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13301 rval = ENOMEM; 13302 goto err; 13303 } 13304 13305 spec[i].dtsp_buffer = buf; 13306 } 13307 13308 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13309 if (dtrace_anon.dta_state == NULL) { 13310 rval = ENOENT; 13311 goto out; 13312 } 13313 13314 if (state->dts_necbs != 0) { 13315 rval = EALREADY; 13316 goto out; 13317 } 13318 13319 state->dts_anon = dtrace_anon_grab(); 13320 ASSERT(state->dts_anon != NULL); 13321 state = state->dts_anon; 13322 13323 /* 13324 * We want "grabanon" to be set in the grabbed state, so we'll 13325 * copy that option value from the grabbing state into the 13326 * grabbed state. 13327 */ 13328 state->dts_options[DTRACEOPT_GRABANON] = 13329 opt[DTRACEOPT_GRABANON]; 13330 13331 *cpu = dtrace_anon.dta_beganon; 13332 13333 /* 13334 * If the anonymous state is active (as it almost certainly 13335 * is if the anonymous enabling ultimately matched anything), 13336 * we don't allow any further option processing -- but we 13337 * don't return failure. 13338 */ 13339 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13340 goto out; 13341 } 13342 13343 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13344 opt[DTRACEOPT_AGGSIZE] != 0) { 13345 if (state->dts_aggregations == NULL) { 13346 /* 13347 * We're not going to create an aggregation buffer 13348 * because we don't have any ECBs that contain 13349 * aggregations -- set this option to 0. 13350 */ 13351 opt[DTRACEOPT_AGGSIZE] = 0; 13352 } else { 13353 /* 13354 * If we have an aggregation buffer, we must also have 13355 * a buffer to use as scratch. 13356 */ 13357 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13358 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13359 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13360 } 13361 } 13362 } 13363 13364 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13365 opt[DTRACEOPT_SPECSIZE] != 0) { 13366 if (!state->dts_speculates) { 13367 /* 13368 * We're not going to create speculation buffers 13369 * because we don't have any ECBs that actually 13370 * speculate -- set the speculation size to 0. 13371 */ 13372 opt[DTRACEOPT_SPECSIZE] = 0; 13373 } 13374 } 13375 13376 /* 13377 * The bare minimum size for any buffer that we're actually going to 13378 * do anything to is sizeof (uint64_t). 13379 */ 13380 sz = sizeof (uint64_t); 13381 13382 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13383 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13384 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13385 /* 13386 * A buffer size has been explicitly set to 0 (or to a size 13387 * that will be adjusted to 0) and we need the space -- we 13388 * need to return failure. We return ENOSPC to differentiate 13389 * it from failing to allocate a buffer due to failure to meet 13390 * the reserve (for which we return E2BIG). 13391 */ 13392 rval = ENOSPC; 13393 goto out; 13394 } 13395 13396 if ((rval = dtrace_state_buffers(state)) != 0) 13397 goto err; 13398 13399 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13400 sz = dtrace_dstate_defsize; 13401 13402 do { 13403 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13404 13405 if (rval == 0) 13406 break; 13407 13408 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13409 goto err; 13410 } while (sz >>= 1); 13411 13412 opt[DTRACEOPT_DYNVARSIZE] = sz; 13413 13414 if (rval != 0) 13415 goto err; 13416 13417 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13418 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13419 13420 if (opt[DTRACEOPT_CLEANRATE] == 0) 13421 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13422 13423 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13424 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13425 13426 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13427 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13428 13429 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13430 #if defined(sun) 13431 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13432 hdlr.cyh_arg = state; 13433 hdlr.cyh_level = CY_LOW_LEVEL; 13434 13435 when.cyt_when = 0; 13436 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13437 13438 state->dts_cleaner = cyclic_add(&hdlr, &when); 13439 13440 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13441 hdlr.cyh_arg = state; 13442 hdlr.cyh_level = CY_LOW_LEVEL; 13443 13444 when.cyt_when = 0; 13445 when.cyt_interval = dtrace_deadman_interval; 13446 13447 state->dts_deadman = cyclic_add(&hdlr, &when); 13448 #else 13449 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13450 dtrace_state_clean, state); 13451 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13452 dtrace_state_deadman, state); 13453 #endif 13454 13455 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13456 13457 /* 13458 * Now it's time to actually fire the BEGIN probe. We need to disable 13459 * interrupts here both to record the CPU on which we fired the BEGIN 13460 * probe (the data from this CPU will be processed first at user 13461 * level) and to manually activate the buffer for this CPU. 13462 */ 13463 cookie = dtrace_interrupt_disable(); 13464 *cpu = curcpu; 13465 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13466 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13467 13468 dtrace_probe(dtrace_probeid_begin, 13469 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13470 dtrace_interrupt_enable(cookie); 13471 /* 13472 * We may have had an exit action from a BEGIN probe; only change our 13473 * state to ACTIVE if we're still in WARMUP. 13474 */ 13475 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13476 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13477 13478 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13479 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13480 13481 /* 13482 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13483 * want each CPU to transition its principal buffer out of the 13484 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13485 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13486 * atomically transition from processing none of a state's ECBs to 13487 * processing all of them. 13488 */ 13489 dtrace_xcall(DTRACE_CPUALL, 13490 (dtrace_xcall_t)dtrace_buffer_activate, state); 13491 goto out; 13492 13493 err: 13494 dtrace_buffer_free(state->dts_buffer); 13495 dtrace_buffer_free(state->dts_aggbuffer); 13496 13497 if ((nspec = state->dts_nspeculations) == 0) { 13498 ASSERT(state->dts_speculations == NULL); 13499 goto out; 13500 } 13501 13502 spec = state->dts_speculations; 13503 ASSERT(spec != NULL); 13504 13505 for (i = 0; i < state->dts_nspeculations; i++) { 13506 if ((buf = spec[i].dtsp_buffer) == NULL) 13507 break; 13508 13509 dtrace_buffer_free(buf); 13510 kmem_free(buf, bufsize); 13511 } 13512 13513 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13514 state->dts_nspeculations = 0; 13515 state->dts_speculations = NULL; 13516 13517 out: 13518 mutex_exit(&dtrace_lock); 13519 mutex_exit(&cpu_lock); 13520 13521 return (rval); 13522 } 13523 13524 static int 13525 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13526 { 13527 dtrace_icookie_t cookie; 13528 13529 ASSERT(MUTEX_HELD(&dtrace_lock)); 13530 13531 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13532 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13533 return (EINVAL); 13534 13535 /* 13536 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13537 * to be sure that every CPU has seen it. See below for the details 13538 * on why this is done. 13539 */ 13540 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13541 dtrace_sync(); 13542 13543 /* 13544 * By this point, it is impossible for any CPU to be still processing 13545 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13546 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13547 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13548 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13549 * iff we're in the END probe. 13550 */ 13551 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13552 dtrace_sync(); 13553 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13554 13555 /* 13556 * Finally, we can release the reserve and call the END probe. We 13557 * disable interrupts across calling the END probe to allow us to 13558 * return the CPU on which we actually called the END probe. This 13559 * allows user-land to be sure that this CPU's principal buffer is 13560 * processed last. 13561 */ 13562 state->dts_reserve = 0; 13563 13564 cookie = dtrace_interrupt_disable(); 13565 *cpu = curcpu; 13566 dtrace_probe(dtrace_probeid_end, 13567 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13568 dtrace_interrupt_enable(cookie); 13569 13570 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13571 dtrace_sync(); 13572 13573 return (0); 13574 } 13575 13576 static int 13577 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13578 dtrace_optval_t val) 13579 { 13580 ASSERT(MUTEX_HELD(&dtrace_lock)); 13581 13582 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13583 return (EBUSY); 13584 13585 if (option >= DTRACEOPT_MAX) 13586 return (EINVAL); 13587 13588 if (option != DTRACEOPT_CPU && val < 0) 13589 return (EINVAL); 13590 13591 switch (option) { 13592 case DTRACEOPT_DESTRUCTIVE: 13593 if (dtrace_destructive_disallow) 13594 return (EACCES); 13595 13596 state->dts_cred.dcr_destructive = 1; 13597 break; 13598 13599 case DTRACEOPT_BUFSIZE: 13600 case DTRACEOPT_DYNVARSIZE: 13601 case DTRACEOPT_AGGSIZE: 13602 case DTRACEOPT_SPECSIZE: 13603 case DTRACEOPT_STRSIZE: 13604 if (val < 0) 13605 return (EINVAL); 13606 13607 if (val >= LONG_MAX) { 13608 /* 13609 * If this is an otherwise negative value, set it to 13610 * the highest multiple of 128m less than LONG_MAX. 13611 * Technically, we're adjusting the size without 13612 * regard to the buffer resizing policy, but in fact, 13613 * this has no effect -- if we set the buffer size to 13614 * ~LONG_MAX and the buffer policy is ultimately set to 13615 * be "manual", the buffer allocation is guaranteed to 13616 * fail, if only because the allocation requires two 13617 * buffers. (We set the the size to the highest 13618 * multiple of 128m because it ensures that the size 13619 * will remain a multiple of a megabyte when 13620 * repeatedly halved -- all the way down to 15m.) 13621 */ 13622 val = LONG_MAX - (1 << 27) + 1; 13623 } 13624 } 13625 13626 state->dts_options[option] = val; 13627 13628 return (0); 13629 } 13630 13631 static void 13632 dtrace_state_destroy(dtrace_state_t *state) 13633 { 13634 dtrace_ecb_t *ecb; 13635 dtrace_vstate_t *vstate = &state->dts_vstate; 13636 #if defined(sun) 13637 minor_t minor = getminor(state->dts_dev); 13638 #endif 13639 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13640 dtrace_speculation_t *spec = state->dts_speculations; 13641 int nspec = state->dts_nspeculations; 13642 uint32_t match; 13643 13644 ASSERT(MUTEX_HELD(&dtrace_lock)); 13645 ASSERT(MUTEX_HELD(&cpu_lock)); 13646 13647 /* 13648 * First, retract any retained enablings for this state. 13649 */ 13650 dtrace_enabling_retract(state); 13651 ASSERT(state->dts_nretained == 0); 13652 13653 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13654 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13655 /* 13656 * We have managed to come into dtrace_state_destroy() on a 13657 * hot enabling -- almost certainly because of a disorderly 13658 * shutdown of a consumer. (That is, a consumer that is 13659 * exiting without having called dtrace_stop().) In this case, 13660 * we're going to set our activity to be KILLED, and then 13661 * issue a sync to be sure that everyone is out of probe 13662 * context before we start blowing away ECBs. 13663 */ 13664 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13665 dtrace_sync(); 13666 } 13667 13668 /* 13669 * Release the credential hold we took in dtrace_state_create(). 13670 */ 13671 if (state->dts_cred.dcr_cred != NULL) 13672 crfree(state->dts_cred.dcr_cred); 13673 13674 /* 13675 * Now we can safely disable and destroy any enabled probes. Because 13676 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13677 * (especially if they're all enabled), we take two passes through the 13678 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13679 * in the second we disable whatever is left over. 13680 */ 13681 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13682 for (i = 0; i < state->dts_necbs; i++) { 13683 if ((ecb = state->dts_ecbs[i]) == NULL) 13684 continue; 13685 13686 if (match && ecb->dte_probe != NULL) { 13687 dtrace_probe_t *probe = ecb->dte_probe; 13688 dtrace_provider_t *prov = probe->dtpr_provider; 13689 13690 if (!(prov->dtpv_priv.dtpp_flags & match)) 13691 continue; 13692 } 13693 13694 dtrace_ecb_disable(ecb); 13695 dtrace_ecb_destroy(ecb); 13696 } 13697 13698 if (!match) 13699 break; 13700 } 13701 13702 /* 13703 * Before we free the buffers, perform one more sync to assure that 13704 * every CPU is out of probe context. 13705 */ 13706 dtrace_sync(); 13707 13708 dtrace_buffer_free(state->dts_buffer); 13709 dtrace_buffer_free(state->dts_aggbuffer); 13710 13711 for (i = 0; i < nspec; i++) 13712 dtrace_buffer_free(spec[i].dtsp_buffer); 13713 13714 #if defined(sun) 13715 if (state->dts_cleaner != CYCLIC_NONE) 13716 cyclic_remove(state->dts_cleaner); 13717 13718 if (state->dts_deadman != CYCLIC_NONE) 13719 cyclic_remove(state->dts_deadman); 13720 #else 13721 callout_stop(&state->dts_cleaner); 13722 callout_drain(&state->dts_cleaner); 13723 callout_stop(&state->dts_deadman); 13724 callout_drain(&state->dts_deadman); 13725 #endif 13726 13727 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13728 dtrace_vstate_fini(vstate); 13729 if (state->dts_ecbs != NULL) 13730 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13731 13732 if (state->dts_aggregations != NULL) { 13733 #ifdef DEBUG 13734 for (i = 0; i < state->dts_naggregations; i++) 13735 ASSERT(state->dts_aggregations[i] == NULL); 13736 #endif 13737 ASSERT(state->dts_naggregations > 0); 13738 kmem_free(state->dts_aggregations, 13739 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13740 } 13741 13742 kmem_free(state->dts_buffer, bufsize); 13743 kmem_free(state->dts_aggbuffer, bufsize); 13744 13745 for (i = 0; i < nspec; i++) 13746 kmem_free(spec[i].dtsp_buffer, bufsize); 13747 13748 if (spec != NULL) 13749 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13750 13751 dtrace_format_destroy(state); 13752 13753 if (state->dts_aggid_arena != NULL) { 13754 #if defined(sun) 13755 vmem_destroy(state->dts_aggid_arena); 13756 #else 13757 delete_unrhdr(state->dts_aggid_arena); 13758 #endif 13759 state->dts_aggid_arena = NULL; 13760 } 13761 #if defined(sun) 13762 ddi_soft_state_free(dtrace_softstate, minor); 13763 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13764 #endif 13765 } 13766 13767 /* 13768 * DTrace Anonymous Enabling Functions 13769 */ 13770 static dtrace_state_t * 13771 dtrace_anon_grab(void) 13772 { 13773 dtrace_state_t *state; 13774 13775 ASSERT(MUTEX_HELD(&dtrace_lock)); 13776 13777 if ((state = dtrace_anon.dta_state) == NULL) { 13778 ASSERT(dtrace_anon.dta_enabling == NULL); 13779 return (NULL); 13780 } 13781 13782 ASSERT(dtrace_anon.dta_enabling != NULL); 13783 ASSERT(dtrace_retained != NULL); 13784 13785 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13786 dtrace_anon.dta_enabling = NULL; 13787 dtrace_anon.dta_state = NULL; 13788 13789 return (state); 13790 } 13791 13792 static void 13793 dtrace_anon_property(void) 13794 { 13795 int i, rv; 13796 dtrace_state_t *state; 13797 dof_hdr_t *dof; 13798 char c[32]; /* enough for "dof-data-" + digits */ 13799 13800 ASSERT(MUTEX_HELD(&dtrace_lock)); 13801 ASSERT(MUTEX_HELD(&cpu_lock)); 13802 13803 for (i = 0; ; i++) { 13804 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13805 13806 dtrace_err_verbose = 1; 13807 13808 if ((dof = dtrace_dof_property(c)) == NULL) { 13809 dtrace_err_verbose = 0; 13810 break; 13811 } 13812 13813 #if defined(sun) 13814 /* 13815 * We want to create anonymous state, so we need to transition 13816 * the kernel debugger to indicate that DTrace is active. If 13817 * this fails (e.g. because the debugger has modified text in 13818 * some way), we won't continue with the processing. 13819 */ 13820 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13821 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13822 "enabling ignored."); 13823 dtrace_dof_destroy(dof); 13824 break; 13825 } 13826 #endif 13827 13828 /* 13829 * If we haven't allocated an anonymous state, we'll do so now. 13830 */ 13831 if ((state = dtrace_anon.dta_state) == NULL) { 13832 #if defined(sun) 13833 state = dtrace_state_create(NULL, NULL); 13834 #else 13835 state = dtrace_state_create(NULL); 13836 #endif 13837 dtrace_anon.dta_state = state; 13838 13839 if (state == NULL) { 13840 /* 13841 * This basically shouldn't happen: the only 13842 * failure mode from dtrace_state_create() is a 13843 * failure of ddi_soft_state_zalloc() that 13844 * itself should never happen. Still, the 13845 * interface allows for a failure mode, and 13846 * we want to fail as gracefully as possible: 13847 * we'll emit an error message and cease 13848 * processing anonymous state in this case. 13849 */ 13850 cmn_err(CE_WARN, "failed to create " 13851 "anonymous state"); 13852 dtrace_dof_destroy(dof); 13853 break; 13854 } 13855 } 13856 13857 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13858 &dtrace_anon.dta_enabling, 0, B_TRUE); 13859 13860 if (rv == 0) 13861 rv = dtrace_dof_options(dof, state); 13862 13863 dtrace_err_verbose = 0; 13864 dtrace_dof_destroy(dof); 13865 13866 if (rv != 0) { 13867 /* 13868 * This is malformed DOF; chuck any anonymous state 13869 * that we created. 13870 */ 13871 ASSERT(dtrace_anon.dta_enabling == NULL); 13872 dtrace_state_destroy(state); 13873 dtrace_anon.dta_state = NULL; 13874 break; 13875 } 13876 13877 ASSERT(dtrace_anon.dta_enabling != NULL); 13878 } 13879 13880 if (dtrace_anon.dta_enabling != NULL) { 13881 int rval; 13882 13883 /* 13884 * dtrace_enabling_retain() can only fail because we are 13885 * trying to retain more enablings than are allowed -- but 13886 * we only have one anonymous enabling, and we are guaranteed 13887 * to be allowed at least one retained enabling; we assert 13888 * that dtrace_enabling_retain() returns success. 13889 */ 13890 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13891 ASSERT(rval == 0); 13892 13893 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13894 } 13895 } 13896 13897 /* 13898 * DTrace Helper Functions 13899 */ 13900 static void 13901 dtrace_helper_trace(dtrace_helper_action_t *helper, 13902 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13903 { 13904 uint32_t size, next, nnext, i; 13905 dtrace_helptrace_t *ent; 13906 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13907 13908 if (!dtrace_helptrace_enabled) 13909 return; 13910 13911 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13912 13913 /* 13914 * What would a tracing framework be without its own tracing 13915 * framework? (Well, a hell of a lot simpler, for starters...) 13916 */ 13917 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13918 sizeof (uint64_t) - sizeof (uint64_t); 13919 13920 /* 13921 * Iterate until we can allocate a slot in the trace buffer. 13922 */ 13923 do { 13924 next = dtrace_helptrace_next; 13925 13926 if (next + size < dtrace_helptrace_bufsize) { 13927 nnext = next + size; 13928 } else { 13929 nnext = size; 13930 } 13931 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13932 13933 /* 13934 * We have our slot; fill it in. 13935 */ 13936 if (nnext == size) 13937 next = 0; 13938 13939 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13940 ent->dtht_helper = helper; 13941 ent->dtht_where = where; 13942 ent->dtht_nlocals = vstate->dtvs_nlocals; 13943 13944 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13945 mstate->dtms_fltoffs : -1; 13946 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13947 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 13948 13949 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13950 dtrace_statvar_t *svar; 13951 13952 if ((svar = vstate->dtvs_locals[i]) == NULL) 13953 continue; 13954 13955 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13956 ent->dtht_locals[i] = 13957 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 13958 } 13959 } 13960 13961 static uint64_t 13962 dtrace_helper(int which, dtrace_mstate_t *mstate, 13963 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13964 { 13965 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 13966 uint64_t sarg0 = mstate->dtms_arg[0]; 13967 uint64_t sarg1 = mstate->dtms_arg[1]; 13968 uint64_t rval = 0; 13969 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13970 dtrace_helper_action_t *helper; 13971 dtrace_vstate_t *vstate; 13972 dtrace_difo_t *pred; 13973 int i, trace = dtrace_helptrace_enabled; 13974 13975 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13976 13977 if (helpers == NULL) 13978 return (0); 13979 13980 if ((helper = helpers->dthps_actions[which]) == NULL) 13981 return (0); 13982 13983 vstate = &helpers->dthps_vstate; 13984 mstate->dtms_arg[0] = arg0; 13985 mstate->dtms_arg[1] = arg1; 13986 13987 /* 13988 * Now iterate over each helper. If its predicate evaluates to 'true', 13989 * we'll call the corresponding actions. Note that the below calls 13990 * to dtrace_dif_emulate() may set faults in machine state. This is 13991 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13992 * the stored DIF offset with its own (which is the desired behavior). 13993 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13994 * from machine state; this is okay, too. 13995 */ 13996 for (; helper != NULL; helper = helper->dtha_next) { 13997 if ((pred = helper->dtha_predicate) != NULL) { 13998 if (trace) 13999 dtrace_helper_trace(helper, mstate, vstate, 0); 14000 14001 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14002 goto next; 14003 14004 if (*flags & CPU_DTRACE_FAULT) 14005 goto err; 14006 } 14007 14008 for (i = 0; i < helper->dtha_nactions; i++) { 14009 if (trace) 14010 dtrace_helper_trace(helper, 14011 mstate, vstate, i + 1); 14012 14013 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14014 mstate, vstate, state); 14015 14016 if (*flags & CPU_DTRACE_FAULT) 14017 goto err; 14018 } 14019 14020 next: 14021 if (trace) 14022 dtrace_helper_trace(helper, mstate, vstate, 14023 DTRACE_HELPTRACE_NEXT); 14024 } 14025 14026 if (trace) 14027 dtrace_helper_trace(helper, mstate, vstate, 14028 DTRACE_HELPTRACE_DONE); 14029 14030 /* 14031 * Restore the arg0 that we saved upon entry. 14032 */ 14033 mstate->dtms_arg[0] = sarg0; 14034 mstate->dtms_arg[1] = sarg1; 14035 14036 return (rval); 14037 14038 err: 14039 if (trace) 14040 dtrace_helper_trace(helper, mstate, vstate, 14041 DTRACE_HELPTRACE_ERR); 14042 14043 /* 14044 * Restore the arg0 that we saved upon entry. 14045 */ 14046 mstate->dtms_arg[0] = sarg0; 14047 mstate->dtms_arg[1] = sarg1; 14048 14049 return (0); 14050 } 14051 14052 static void 14053 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14054 dtrace_vstate_t *vstate) 14055 { 14056 int i; 14057 14058 if (helper->dtha_predicate != NULL) 14059 dtrace_difo_release(helper->dtha_predicate, vstate); 14060 14061 for (i = 0; i < helper->dtha_nactions; i++) { 14062 ASSERT(helper->dtha_actions[i] != NULL); 14063 dtrace_difo_release(helper->dtha_actions[i], vstate); 14064 } 14065 14066 kmem_free(helper->dtha_actions, 14067 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14068 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14069 } 14070 14071 static int 14072 dtrace_helper_destroygen(int gen) 14073 { 14074 proc_t *p = curproc; 14075 dtrace_helpers_t *help = p->p_dtrace_helpers; 14076 dtrace_vstate_t *vstate; 14077 int i; 14078 14079 ASSERT(MUTEX_HELD(&dtrace_lock)); 14080 14081 if (help == NULL || gen > help->dthps_generation) 14082 return (EINVAL); 14083 14084 vstate = &help->dthps_vstate; 14085 14086 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14087 dtrace_helper_action_t *last = NULL, *h, *next; 14088 14089 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14090 next = h->dtha_next; 14091 14092 if (h->dtha_generation == gen) { 14093 if (last != NULL) { 14094 last->dtha_next = next; 14095 } else { 14096 help->dthps_actions[i] = next; 14097 } 14098 14099 dtrace_helper_action_destroy(h, vstate); 14100 } else { 14101 last = h; 14102 } 14103 } 14104 } 14105 14106 /* 14107 * Interate until we've cleared out all helper providers with the 14108 * given generation number. 14109 */ 14110 for (;;) { 14111 dtrace_helper_provider_t *prov; 14112 14113 /* 14114 * Look for a helper provider with the right generation. We 14115 * have to start back at the beginning of the list each time 14116 * because we drop dtrace_lock. It's unlikely that we'll make 14117 * more than two passes. 14118 */ 14119 for (i = 0; i < help->dthps_nprovs; i++) { 14120 prov = help->dthps_provs[i]; 14121 14122 if (prov->dthp_generation == gen) 14123 break; 14124 } 14125 14126 /* 14127 * If there were no matches, we're done. 14128 */ 14129 if (i == help->dthps_nprovs) 14130 break; 14131 14132 /* 14133 * Move the last helper provider into this slot. 14134 */ 14135 help->dthps_nprovs--; 14136 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14137 help->dthps_provs[help->dthps_nprovs] = NULL; 14138 14139 mutex_exit(&dtrace_lock); 14140 14141 /* 14142 * If we have a meta provider, remove this helper provider. 14143 */ 14144 mutex_enter(&dtrace_meta_lock); 14145 if (dtrace_meta_pid != NULL) { 14146 ASSERT(dtrace_deferred_pid == NULL); 14147 dtrace_helper_provider_remove(&prov->dthp_prov, 14148 p->p_pid); 14149 } 14150 mutex_exit(&dtrace_meta_lock); 14151 14152 dtrace_helper_provider_destroy(prov); 14153 14154 mutex_enter(&dtrace_lock); 14155 } 14156 14157 return (0); 14158 } 14159 14160 static int 14161 dtrace_helper_validate(dtrace_helper_action_t *helper) 14162 { 14163 int err = 0, i; 14164 dtrace_difo_t *dp; 14165 14166 if ((dp = helper->dtha_predicate) != NULL) 14167 err += dtrace_difo_validate_helper(dp); 14168 14169 for (i = 0; i < helper->dtha_nactions; i++) 14170 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14171 14172 return (err == 0); 14173 } 14174 14175 static int 14176 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14177 { 14178 dtrace_helpers_t *help; 14179 dtrace_helper_action_t *helper, *last; 14180 dtrace_actdesc_t *act; 14181 dtrace_vstate_t *vstate; 14182 dtrace_predicate_t *pred; 14183 int count = 0, nactions = 0, i; 14184 14185 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14186 return (EINVAL); 14187 14188 help = curproc->p_dtrace_helpers; 14189 last = help->dthps_actions[which]; 14190 vstate = &help->dthps_vstate; 14191 14192 for (count = 0; last != NULL; last = last->dtha_next) { 14193 count++; 14194 if (last->dtha_next == NULL) 14195 break; 14196 } 14197 14198 /* 14199 * If we already have dtrace_helper_actions_max helper actions for this 14200 * helper action type, we'll refuse to add a new one. 14201 */ 14202 if (count >= dtrace_helper_actions_max) 14203 return (ENOSPC); 14204 14205 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14206 helper->dtha_generation = help->dthps_generation; 14207 14208 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14209 ASSERT(pred->dtp_difo != NULL); 14210 dtrace_difo_hold(pred->dtp_difo); 14211 helper->dtha_predicate = pred->dtp_difo; 14212 } 14213 14214 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14215 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14216 goto err; 14217 14218 if (act->dtad_difo == NULL) 14219 goto err; 14220 14221 nactions++; 14222 } 14223 14224 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14225 (helper->dtha_nactions = nactions), KM_SLEEP); 14226 14227 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14228 dtrace_difo_hold(act->dtad_difo); 14229 helper->dtha_actions[i++] = act->dtad_difo; 14230 } 14231 14232 if (!dtrace_helper_validate(helper)) 14233 goto err; 14234 14235 if (last == NULL) { 14236 help->dthps_actions[which] = helper; 14237 } else { 14238 last->dtha_next = helper; 14239 } 14240 14241 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14242 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14243 dtrace_helptrace_next = 0; 14244 } 14245 14246 return (0); 14247 err: 14248 dtrace_helper_action_destroy(helper, vstate); 14249 return (EINVAL); 14250 } 14251 14252 static void 14253 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14254 dof_helper_t *dofhp) 14255 { 14256 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14257 14258 mutex_enter(&dtrace_meta_lock); 14259 mutex_enter(&dtrace_lock); 14260 14261 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14262 /* 14263 * If the dtrace module is loaded but not attached, or if 14264 * there aren't isn't a meta provider registered to deal with 14265 * these provider descriptions, we need to postpone creating 14266 * the actual providers until later. 14267 */ 14268 14269 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14270 dtrace_deferred_pid != help) { 14271 help->dthps_deferred = 1; 14272 help->dthps_pid = p->p_pid; 14273 help->dthps_next = dtrace_deferred_pid; 14274 help->dthps_prev = NULL; 14275 if (dtrace_deferred_pid != NULL) 14276 dtrace_deferred_pid->dthps_prev = help; 14277 dtrace_deferred_pid = help; 14278 } 14279 14280 mutex_exit(&dtrace_lock); 14281 14282 } else if (dofhp != NULL) { 14283 /* 14284 * If the dtrace module is loaded and we have a particular 14285 * helper provider description, pass that off to the 14286 * meta provider. 14287 */ 14288 14289 mutex_exit(&dtrace_lock); 14290 14291 dtrace_helper_provide(dofhp, p->p_pid); 14292 14293 } else { 14294 /* 14295 * Otherwise, just pass all the helper provider descriptions 14296 * off to the meta provider. 14297 */ 14298 14299 int i; 14300 mutex_exit(&dtrace_lock); 14301 14302 for (i = 0; i < help->dthps_nprovs; i++) { 14303 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14304 p->p_pid); 14305 } 14306 } 14307 14308 mutex_exit(&dtrace_meta_lock); 14309 } 14310 14311 static int 14312 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14313 { 14314 dtrace_helpers_t *help; 14315 dtrace_helper_provider_t *hprov, **tmp_provs; 14316 uint_t tmp_maxprovs, i; 14317 14318 ASSERT(MUTEX_HELD(&dtrace_lock)); 14319 14320 help = curproc->p_dtrace_helpers; 14321 ASSERT(help != NULL); 14322 14323 /* 14324 * If we already have dtrace_helper_providers_max helper providers, 14325 * we're refuse to add a new one. 14326 */ 14327 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14328 return (ENOSPC); 14329 14330 /* 14331 * Check to make sure this isn't a duplicate. 14332 */ 14333 for (i = 0; i < help->dthps_nprovs; i++) { 14334 if (dofhp->dofhp_addr == 14335 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14336 return (EALREADY); 14337 } 14338 14339 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14340 hprov->dthp_prov = *dofhp; 14341 hprov->dthp_ref = 1; 14342 hprov->dthp_generation = gen; 14343 14344 /* 14345 * Allocate a bigger table for helper providers if it's already full. 14346 */ 14347 if (help->dthps_maxprovs == help->dthps_nprovs) { 14348 tmp_maxprovs = help->dthps_maxprovs; 14349 tmp_provs = help->dthps_provs; 14350 14351 if (help->dthps_maxprovs == 0) 14352 help->dthps_maxprovs = 2; 14353 else 14354 help->dthps_maxprovs *= 2; 14355 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14356 help->dthps_maxprovs = dtrace_helper_providers_max; 14357 14358 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14359 14360 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14361 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14362 14363 if (tmp_provs != NULL) { 14364 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14365 sizeof (dtrace_helper_provider_t *)); 14366 kmem_free(tmp_provs, tmp_maxprovs * 14367 sizeof (dtrace_helper_provider_t *)); 14368 } 14369 } 14370 14371 help->dthps_provs[help->dthps_nprovs] = hprov; 14372 help->dthps_nprovs++; 14373 14374 return (0); 14375 } 14376 14377 static void 14378 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14379 { 14380 mutex_enter(&dtrace_lock); 14381 14382 if (--hprov->dthp_ref == 0) { 14383 dof_hdr_t *dof; 14384 mutex_exit(&dtrace_lock); 14385 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14386 dtrace_dof_destroy(dof); 14387 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14388 } else { 14389 mutex_exit(&dtrace_lock); 14390 } 14391 } 14392 14393 static int 14394 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14395 { 14396 uintptr_t daddr = (uintptr_t)dof; 14397 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14398 dof_provider_t *provider; 14399 dof_probe_t *probe; 14400 uint8_t *arg; 14401 char *strtab, *typestr; 14402 dof_stridx_t typeidx; 14403 size_t typesz; 14404 uint_t nprobes, j, k; 14405 14406 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14407 14408 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14409 dtrace_dof_error(dof, "misaligned section offset"); 14410 return (-1); 14411 } 14412 14413 /* 14414 * The section needs to be large enough to contain the DOF provider 14415 * structure appropriate for the given version. 14416 */ 14417 if (sec->dofs_size < 14418 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14419 offsetof(dof_provider_t, dofpv_prenoffs) : 14420 sizeof (dof_provider_t))) { 14421 dtrace_dof_error(dof, "provider section too small"); 14422 return (-1); 14423 } 14424 14425 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14426 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14427 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14428 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14429 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14430 14431 if (str_sec == NULL || prb_sec == NULL || 14432 arg_sec == NULL || off_sec == NULL) 14433 return (-1); 14434 14435 enoff_sec = NULL; 14436 14437 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14438 provider->dofpv_prenoffs != DOF_SECT_NONE && 14439 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14440 provider->dofpv_prenoffs)) == NULL) 14441 return (-1); 14442 14443 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14444 14445 if (provider->dofpv_name >= str_sec->dofs_size || 14446 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14447 dtrace_dof_error(dof, "invalid provider name"); 14448 return (-1); 14449 } 14450 14451 if (prb_sec->dofs_entsize == 0 || 14452 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14453 dtrace_dof_error(dof, "invalid entry size"); 14454 return (-1); 14455 } 14456 14457 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14458 dtrace_dof_error(dof, "misaligned entry size"); 14459 return (-1); 14460 } 14461 14462 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14463 dtrace_dof_error(dof, "invalid entry size"); 14464 return (-1); 14465 } 14466 14467 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14468 dtrace_dof_error(dof, "misaligned section offset"); 14469 return (-1); 14470 } 14471 14472 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14473 dtrace_dof_error(dof, "invalid entry size"); 14474 return (-1); 14475 } 14476 14477 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14478 14479 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14480 14481 /* 14482 * Take a pass through the probes to check for errors. 14483 */ 14484 for (j = 0; j < nprobes; j++) { 14485 probe = (dof_probe_t *)(uintptr_t)(daddr + 14486 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14487 14488 if (probe->dofpr_func >= str_sec->dofs_size) { 14489 dtrace_dof_error(dof, "invalid function name"); 14490 return (-1); 14491 } 14492 14493 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14494 dtrace_dof_error(dof, "function name too long"); 14495 return (-1); 14496 } 14497 14498 if (probe->dofpr_name >= str_sec->dofs_size || 14499 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14500 dtrace_dof_error(dof, "invalid probe name"); 14501 return (-1); 14502 } 14503 14504 /* 14505 * The offset count must not wrap the index, and the offsets 14506 * must also not overflow the section's data. 14507 */ 14508 if (probe->dofpr_offidx + probe->dofpr_noffs < 14509 probe->dofpr_offidx || 14510 (probe->dofpr_offidx + probe->dofpr_noffs) * 14511 off_sec->dofs_entsize > off_sec->dofs_size) { 14512 dtrace_dof_error(dof, "invalid probe offset"); 14513 return (-1); 14514 } 14515 14516 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14517 /* 14518 * If there's no is-enabled offset section, make sure 14519 * there aren't any is-enabled offsets. Otherwise 14520 * perform the same checks as for probe offsets 14521 * (immediately above). 14522 */ 14523 if (enoff_sec == NULL) { 14524 if (probe->dofpr_enoffidx != 0 || 14525 probe->dofpr_nenoffs != 0) { 14526 dtrace_dof_error(dof, "is-enabled " 14527 "offsets with null section"); 14528 return (-1); 14529 } 14530 } else if (probe->dofpr_enoffidx + 14531 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14532 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14533 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14534 dtrace_dof_error(dof, "invalid is-enabled " 14535 "offset"); 14536 return (-1); 14537 } 14538 14539 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14540 dtrace_dof_error(dof, "zero probe and " 14541 "is-enabled offsets"); 14542 return (-1); 14543 } 14544 } else if (probe->dofpr_noffs == 0) { 14545 dtrace_dof_error(dof, "zero probe offsets"); 14546 return (-1); 14547 } 14548 14549 if (probe->dofpr_argidx + probe->dofpr_xargc < 14550 probe->dofpr_argidx || 14551 (probe->dofpr_argidx + probe->dofpr_xargc) * 14552 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14553 dtrace_dof_error(dof, "invalid args"); 14554 return (-1); 14555 } 14556 14557 typeidx = probe->dofpr_nargv; 14558 typestr = strtab + probe->dofpr_nargv; 14559 for (k = 0; k < probe->dofpr_nargc; k++) { 14560 if (typeidx >= str_sec->dofs_size) { 14561 dtrace_dof_error(dof, "bad " 14562 "native argument type"); 14563 return (-1); 14564 } 14565 14566 typesz = strlen(typestr) + 1; 14567 if (typesz > DTRACE_ARGTYPELEN) { 14568 dtrace_dof_error(dof, "native " 14569 "argument type too long"); 14570 return (-1); 14571 } 14572 typeidx += typesz; 14573 typestr += typesz; 14574 } 14575 14576 typeidx = probe->dofpr_xargv; 14577 typestr = strtab + probe->dofpr_xargv; 14578 for (k = 0; k < probe->dofpr_xargc; k++) { 14579 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14580 dtrace_dof_error(dof, "bad " 14581 "native argument index"); 14582 return (-1); 14583 } 14584 14585 if (typeidx >= str_sec->dofs_size) { 14586 dtrace_dof_error(dof, "bad " 14587 "translated argument type"); 14588 return (-1); 14589 } 14590 14591 typesz = strlen(typestr) + 1; 14592 if (typesz > DTRACE_ARGTYPELEN) { 14593 dtrace_dof_error(dof, "translated argument " 14594 "type too long"); 14595 return (-1); 14596 } 14597 14598 typeidx += typesz; 14599 typestr += typesz; 14600 } 14601 } 14602 14603 return (0); 14604 } 14605 14606 static int 14607 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14608 { 14609 dtrace_helpers_t *help; 14610 dtrace_vstate_t *vstate; 14611 dtrace_enabling_t *enab = NULL; 14612 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14613 uintptr_t daddr = (uintptr_t)dof; 14614 14615 ASSERT(MUTEX_HELD(&dtrace_lock)); 14616 14617 if ((help = curproc->p_dtrace_helpers) == NULL) 14618 help = dtrace_helpers_create(curproc); 14619 14620 vstate = &help->dthps_vstate; 14621 14622 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14623 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14624 dtrace_dof_destroy(dof); 14625 return (rv); 14626 } 14627 14628 /* 14629 * Look for helper providers and validate their descriptions. 14630 */ 14631 if (dhp != NULL) { 14632 for (i = 0; i < dof->dofh_secnum; i++) { 14633 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14634 dof->dofh_secoff + i * dof->dofh_secsize); 14635 14636 if (sec->dofs_type != DOF_SECT_PROVIDER) 14637 continue; 14638 14639 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14640 dtrace_enabling_destroy(enab); 14641 dtrace_dof_destroy(dof); 14642 return (-1); 14643 } 14644 14645 nprovs++; 14646 } 14647 } 14648 14649 /* 14650 * Now we need to walk through the ECB descriptions in the enabling. 14651 */ 14652 for (i = 0; i < enab->dten_ndesc; i++) { 14653 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14654 dtrace_probedesc_t *desc = &ep->dted_probe; 14655 14656 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14657 continue; 14658 14659 if (strcmp(desc->dtpd_mod, "helper") != 0) 14660 continue; 14661 14662 if (strcmp(desc->dtpd_func, "ustack") != 0) 14663 continue; 14664 14665 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14666 ep)) != 0) { 14667 /* 14668 * Adding this helper action failed -- we are now going 14669 * to rip out the entire generation and return failure. 14670 */ 14671 (void) dtrace_helper_destroygen(help->dthps_generation); 14672 dtrace_enabling_destroy(enab); 14673 dtrace_dof_destroy(dof); 14674 return (-1); 14675 } 14676 14677 nhelpers++; 14678 } 14679 14680 if (nhelpers < enab->dten_ndesc) 14681 dtrace_dof_error(dof, "unmatched helpers"); 14682 14683 gen = help->dthps_generation++; 14684 dtrace_enabling_destroy(enab); 14685 14686 if (dhp != NULL && nprovs > 0) { 14687 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14688 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14689 mutex_exit(&dtrace_lock); 14690 dtrace_helper_provider_register(curproc, help, dhp); 14691 mutex_enter(&dtrace_lock); 14692 14693 destroy = 0; 14694 } 14695 } 14696 14697 if (destroy) 14698 dtrace_dof_destroy(dof); 14699 14700 return (gen); 14701 } 14702 14703 static dtrace_helpers_t * 14704 dtrace_helpers_create(proc_t *p) 14705 { 14706 dtrace_helpers_t *help; 14707 14708 ASSERT(MUTEX_HELD(&dtrace_lock)); 14709 ASSERT(p->p_dtrace_helpers == NULL); 14710 14711 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14712 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14713 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14714 14715 p->p_dtrace_helpers = help; 14716 dtrace_helpers++; 14717 14718 return (help); 14719 } 14720 14721 #if defined(sun) 14722 static 14723 #endif 14724 void 14725 dtrace_helpers_destroy(proc_t *p) 14726 { 14727 dtrace_helpers_t *help; 14728 dtrace_vstate_t *vstate; 14729 #if defined(sun) 14730 proc_t *p = curproc; 14731 #endif 14732 int i; 14733 14734 mutex_enter(&dtrace_lock); 14735 14736 ASSERT(p->p_dtrace_helpers != NULL); 14737 ASSERT(dtrace_helpers > 0); 14738 14739 help = p->p_dtrace_helpers; 14740 vstate = &help->dthps_vstate; 14741 14742 /* 14743 * We're now going to lose the help from this process. 14744 */ 14745 p->p_dtrace_helpers = NULL; 14746 dtrace_sync(); 14747 14748 /* 14749 * Destory the helper actions. 14750 */ 14751 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14752 dtrace_helper_action_t *h, *next; 14753 14754 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14755 next = h->dtha_next; 14756 dtrace_helper_action_destroy(h, vstate); 14757 h = next; 14758 } 14759 } 14760 14761 mutex_exit(&dtrace_lock); 14762 14763 /* 14764 * Destroy the helper providers. 14765 */ 14766 if (help->dthps_maxprovs > 0) { 14767 mutex_enter(&dtrace_meta_lock); 14768 if (dtrace_meta_pid != NULL) { 14769 ASSERT(dtrace_deferred_pid == NULL); 14770 14771 for (i = 0; i < help->dthps_nprovs; i++) { 14772 dtrace_helper_provider_remove( 14773 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14774 } 14775 } else { 14776 mutex_enter(&dtrace_lock); 14777 ASSERT(help->dthps_deferred == 0 || 14778 help->dthps_next != NULL || 14779 help->dthps_prev != NULL || 14780 help == dtrace_deferred_pid); 14781 14782 /* 14783 * Remove the helper from the deferred list. 14784 */ 14785 if (help->dthps_next != NULL) 14786 help->dthps_next->dthps_prev = help->dthps_prev; 14787 if (help->dthps_prev != NULL) 14788 help->dthps_prev->dthps_next = help->dthps_next; 14789 if (dtrace_deferred_pid == help) { 14790 dtrace_deferred_pid = help->dthps_next; 14791 ASSERT(help->dthps_prev == NULL); 14792 } 14793 14794 mutex_exit(&dtrace_lock); 14795 } 14796 14797 mutex_exit(&dtrace_meta_lock); 14798 14799 for (i = 0; i < help->dthps_nprovs; i++) { 14800 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14801 } 14802 14803 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14804 sizeof (dtrace_helper_provider_t *)); 14805 } 14806 14807 mutex_enter(&dtrace_lock); 14808 14809 dtrace_vstate_fini(&help->dthps_vstate); 14810 kmem_free(help->dthps_actions, 14811 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14812 kmem_free(help, sizeof (dtrace_helpers_t)); 14813 14814 --dtrace_helpers; 14815 mutex_exit(&dtrace_lock); 14816 } 14817 14818 #if defined(sun) 14819 static 14820 #endif 14821 void 14822 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14823 { 14824 dtrace_helpers_t *help, *newhelp; 14825 dtrace_helper_action_t *helper, *new, *last; 14826 dtrace_difo_t *dp; 14827 dtrace_vstate_t *vstate; 14828 int i, j, sz, hasprovs = 0; 14829 14830 mutex_enter(&dtrace_lock); 14831 ASSERT(from->p_dtrace_helpers != NULL); 14832 ASSERT(dtrace_helpers > 0); 14833 14834 help = from->p_dtrace_helpers; 14835 newhelp = dtrace_helpers_create(to); 14836 ASSERT(to->p_dtrace_helpers != NULL); 14837 14838 newhelp->dthps_generation = help->dthps_generation; 14839 vstate = &newhelp->dthps_vstate; 14840 14841 /* 14842 * Duplicate the helper actions. 14843 */ 14844 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14845 if ((helper = help->dthps_actions[i]) == NULL) 14846 continue; 14847 14848 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14849 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14850 KM_SLEEP); 14851 new->dtha_generation = helper->dtha_generation; 14852 14853 if ((dp = helper->dtha_predicate) != NULL) { 14854 dp = dtrace_difo_duplicate(dp, vstate); 14855 new->dtha_predicate = dp; 14856 } 14857 14858 new->dtha_nactions = helper->dtha_nactions; 14859 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14860 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14861 14862 for (j = 0; j < new->dtha_nactions; j++) { 14863 dtrace_difo_t *dp = helper->dtha_actions[j]; 14864 14865 ASSERT(dp != NULL); 14866 dp = dtrace_difo_duplicate(dp, vstate); 14867 new->dtha_actions[j] = dp; 14868 } 14869 14870 if (last != NULL) { 14871 last->dtha_next = new; 14872 } else { 14873 newhelp->dthps_actions[i] = new; 14874 } 14875 14876 last = new; 14877 } 14878 } 14879 14880 /* 14881 * Duplicate the helper providers and register them with the 14882 * DTrace framework. 14883 */ 14884 if (help->dthps_nprovs > 0) { 14885 newhelp->dthps_nprovs = help->dthps_nprovs; 14886 newhelp->dthps_maxprovs = help->dthps_nprovs; 14887 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14888 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14889 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14890 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14891 newhelp->dthps_provs[i]->dthp_ref++; 14892 } 14893 14894 hasprovs = 1; 14895 } 14896 14897 mutex_exit(&dtrace_lock); 14898 14899 if (hasprovs) 14900 dtrace_helper_provider_register(to, newhelp, NULL); 14901 } 14902 14903 #if defined(sun) 14904 /* 14905 * DTrace Hook Functions 14906 */ 14907 static void 14908 dtrace_module_loaded(modctl_t *ctl) 14909 { 14910 dtrace_provider_t *prv; 14911 14912 mutex_enter(&dtrace_provider_lock); 14913 mutex_enter(&mod_lock); 14914 14915 ASSERT(ctl->mod_busy); 14916 14917 /* 14918 * We're going to call each providers per-module provide operation 14919 * specifying only this module. 14920 */ 14921 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14922 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14923 14924 mutex_exit(&mod_lock); 14925 mutex_exit(&dtrace_provider_lock); 14926 14927 /* 14928 * If we have any retained enablings, we need to match against them. 14929 * Enabling probes requires that cpu_lock be held, and we cannot hold 14930 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14931 * module. (In particular, this happens when loading scheduling 14932 * classes.) So if we have any retained enablings, we need to dispatch 14933 * our task queue to do the match for us. 14934 */ 14935 mutex_enter(&dtrace_lock); 14936 14937 if (dtrace_retained == NULL) { 14938 mutex_exit(&dtrace_lock); 14939 return; 14940 } 14941 14942 (void) taskq_dispatch(dtrace_taskq, 14943 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14944 14945 mutex_exit(&dtrace_lock); 14946 14947 /* 14948 * And now, for a little heuristic sleaze: in general, we want to 14949 * match modules as soon as they load. However, we cannot guarantee 14950 * this, because it would lead us to the lock ordering violation 14951 * outlined above. The common case, of course, is that cpu_lock is 14952 * _not_ held -- so we delay here for a clock tick, hoping that that's 14953 * long enough for the task queue to do its work. If it's not, it's 14954 * not a serious problem -- it just means that the module that we 14955 * just loaded may not be immediately instrumentable. 14956 */ 14957 delay(1); 14958 } 14959 14960 static void 14961 dtrace_module_unloaded(modctl_t *ctl) 14962 { 14963 dtrace_probe_t template, *probe, *first, *next; 14964 dtrace_provider_t *prov; 14965 14966 template.dtpr_mod = ctl->mod_modname; 14967 14968 mutex_enter(&dtrace_provider_lock); 14969 mutex_enter(&mod_lock); 14970 mutex_enter(&dtrace_lock); 14971 14972 if (dtrace_bymod == NULL) { 14973 /* 14974 * The DTrace module is loaded (obviously) but not attached; 14975 * we don't have any work to do. 14976 */ 14977 mutex_exit(&dtrace_provider_lock); 14978 mutex_exit(&mod_lock); 14979 mutex_exit(&dtrace_lock); 14980 return; 14981 } 14982 14983 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14984 probe != NULL; probe = probe->dtpr_nextmod) { 14985 if (probe->dtpr_ecb != NULL) { 14986 mutex_exit(&dtrace_provider_lock); 14987 mutex_exit(&mod_lock); 14988 mutex_exit(&dtrace_lock); 14989 14990 /* 14991 * This shouldn't _actually_ be possible -- we're 14992 * unloading a module that has an enabled probe in it. 14993 * (It's normally up to the provider to make sure that 14994 * this can't happen.) However, because dtps_enable() 14995 * doesn't have a failure mode, there can be an 14996 * enable/unload race. Upshot: we don't want to 14997 * assert, but we're not going to disable the 14998 * probe, either. 14999 */ 15000 if (dtrace_err_verbose) { 15001 cmn_err(CE_WARN, "unloaded module '%s' had " 15002 "enabled probes", ctl->mod_modname); 15003 } 15004 15005 return; 15006 } 15007 } 15008 15009 probe = first; 15010 15011 for (first = NULL; probe != NULL; probe = next) { 15012 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15013 15014 dtrace_probes[probe->dtpr_id - 1] = NULL; 15015 15016 next = probe->dtpr_nextmod; 15017 dtrace_hash_remove(dtrace_bymod, probe); 15018 dtrace_hash_remove(dtrace_byfunc, probe); 15019 dtrace_hash_remove(dtrace_byname, probe); 15020 15021 if (first == NULL) { 15022 first = probe; 15023 probe->dtpr_nextmod = NULL; 15024 } else { 15025 probe->dtpr_nextmod = first; 15026 first = probe; 15027 } 15028 } 15029 15030 /* 15031 * We've removed all of the module's probes from the hash chains and 15032 * from the probe array. Now issue a dtrace_sync() to be sure that 15033 * everyone has cleared out from any probe array processing. 15034 */ 15035 dtrace_sync(); 15036 15037 for (probe = first; probe != NULL; probe = first) { 15038 first = probe->dtpr_nextmod; 15039 prov = probe->dtpr_provider; 15040 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15041 probe->dtpr_arg); 15042 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15043 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15044 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15045 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15046 kmem_free(probe, sizeof (dtrace_probe_t)); 15047 } 15048 15049 mutex_exit(&dtrace_lock); 15050 mutex_exit(&mod_lock); 15051 mutex_exit(&dtrace_provider_lock); 15052 } 15053 15054 static void 15055 dtrace_suspend(void) 15056 { 15057 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15058 } 15059 15060 static void 15061 dtrace_resume(void) 15062 { 15063 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15064 } 15065 #endif 15066 15067 static int 15068 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15069 { 15070 ASSERT(MUTEX_HELD(&cpu_lock)); 15071 mutex_enter(&dtrace_lock); 15072 15073 switch (what) { 15074 case CPU_CONFIG: { 15075 dtrace_state_t *state; 15076 dtrace_optval_t *opt, rs, c; 15077 15078 /* 15079 * For now, we only allocate a new buffer for anonymous state. 15080 */ 15081 if ((state = dtrace_anon.dta_state) == NULL) 15082 break; 15083 15084 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15085 break; 15086 15087 opt = state->dts_options; 15088 c = opt[DTRACEOPT_CPU]; 15089 15090 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15091 break; 15092 15093 /* 15094 * Regardless of what the actual policy is, we're going to 15095 * temporarily set our resize policy to be manual. We're 15096 * also going to temporarily set our CPU option to denote 15097 * the newly configured CPU. 15098 */ 15099 rs = opt[DTRACEOPT_BUFRESIZE]; 15100 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15101 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15102 15103 (void) dtrace_state_buffers(state); 15104 15105 opt[DTRACEOPT_BUFRESIZE] = rs; 15106 opt[DTRACEOPT_CPU] = c; 15107 15108 break; 15109 } 15110 15111 case CPU_UNCONFIG: 15112 /* 15113 * We don't free the buffer in the CPU_UNCONFIG case. (The 15114 * buffer will be freed when the consumer exits.) 15115 */ 15116 break; 15117 15118 default: 15119 break; 15120 } 15121 15122 mutex_exit(&dtrace_lock); 15123 return (0); 15124 } 15125 15126 #if defined(sun) 15127 static void 15128 dtrace_cpu_setup_initial(processorid_t cpu) 15129 { 15130 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15131 } 15132 #endif 15133 15134 static void 15135 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15136 { 15137 if (dtrace_toxranges >= dtrace_toxranges_max) { 15138 int osize, nsize; 15139 dtrace_toxrange_t *range; 15140 15141 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15142 15143 if (osize == 0) { 15144 ASSERT(dtrace_toxrange == NULL); 15145 ASSERT(dtrace_toxranges_max == 0); 15146 dtrace_toxranges_max = 1; 15147 } else { 15148 dtrace_toxranges_max <<= 1; 15149 } 15150 15151 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15152 range = kmem_zalloc(nsize, KM_SLEEP); 15153 15154 if (dtrace_toxrange != NULL) { 15155 ASSERT(osize != 0); 15156 bcopy(dtrace_toxrange, range, osize); 15157 kmem_free(dtrace_toxrange, osize); 15158 } 15159 15160 dtrace_toxrange = range; 15161 } 15162 15163 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15164 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15165 15166 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15167 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15168 dtrace_toxranges++; 15169 } 15170 15171 /* 15172 * DTrace Driver Cookbook Functions 15173 */ 15174 #if defined(sun) 15175 /*ARGSUSED*/ 15176 static int 15177 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15178 { 15179 dtrace_provider_id_t id; 15180 dtrace_state_t *state = NULL; 15181 dtrace_enabling_t *enab; 15182 15183 mutex_enter(&cpu_lock); 15184 mutex_enter(&dtrace_provider_lock); 15185 mutex_enter(&dtrace_lock); 15186 15187 if (ddi_soft_state_init(&dtrace_softstate, 15188 sizeof (dtrace_state_t), 0) != 0) { 15189 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15190 mutex_exit(&cpu_lock); 15191 mutex_exit(&dtrace_provider_lock); 15192 mutex_exit(&dtrace_lock); 15193 return (DDI_FAILURE); 15194 } 15195 15196 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15197 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15198 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15199 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15200 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15201 ddi_remove_minor_node(devi, NULL); 15202 ddi_soft_state_fini(&dtrace_softstate); 15203 mutex_exit(&cpu_lock); 15204 mutex_exit(&dtrace_provider_lock); 15205 mutex_exit(&dtrace_lock); 15206 return (DDI_FAILURE); 15207 } 15208 15209 ddi_report_dev(devi); 15210 dtrace_devi = devi; 15211 15212 dtrace_modload = dtrace_module_loaded; 15213 dtrace_modunload = dtrace_module_unloaded; 15214 dtrace_cpu_init = dtrace_cpu_setup_initial; 15215 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15216 dtrace_helpers_fork = dtrace_helpers_duplicate; 15217 dtrace_cpustart_init = dtrace_suspend; 15218 dtrace_cpustart_fini = dtrace_resume; 15219 dtrace_debugger_init = dtrace_suspend; 15220 dtrace_debugger_fini = dtrace_resume; 15221 15222 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15223 15224 ASSERT(MUTEX_HELD(&cpu_lock)); 15225 15226 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15227 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15228 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15229 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15230 VM_SLEEP | VMC_IDENTIFIER); 15231 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15232 1, INT_MAX, 0); 15233 15234 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15235 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15236 NULL, NULL, NULL, NULL, NULL, 0); 15237 15238 ASSERT(MUTEX_HELD(&cpu_lock)); 15239 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15240 offsetof(dtrace_probe_t, dtpr_nextmod), 15241 offsetof(dtrace_probe_t, dtpr_prevmod)); 15242 15243 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15244 offsetof(dtrace_probe_t, dtpr_nextfunc), 15245 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15246 15247 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15248 offsetof(dtrace_probe_t, dtpr_nextname), 15249 offsetof(dtrace_probe_t, dtpr_prevname)); 15250 15251 if (dtrace_retain_max < 1) { 15252 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15253 "setting to 1", dtrace_retain_max); 15254 dtrace_retain_max = 1; 15255 } 15256 15257 /* 15258 * Now discover our toxic ranges. 15259 */ 15260 dtrace_toxic_ranges(dtrace_toxrange_add); 15261 15262 /* 15263 * Before we register ourselves as a provider to our own framework, 15264 * we would like to assert that dtrace_provider is NULL -- but that's 15265 * not true if we were loaded as a dependency of a DTrace provider. 15266 * Once we've registered, we can assert that dtrace_provider is our 15267 * pseudo provider. 15268 */ 15269 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15270 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15271 15272 ASSERT(dtrace_provider != NULL); 15273 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15274 15275 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15276 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15277 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15278 dtrace_provider, NULL, NULL, "END", 0, NULL); 15279 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15280 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15281 15282 dtrace_anon_property(); 15283 mutex_exit(&cpu_lock); 15284 15285 /* 15286 * If DTrace helper tracing is enabled, we need to allocate the 15287 * trace buffer and initialize the values. 15288 */ 15289 if (dtrace_helptrace_enabled) { 15290 ASSERT(dtrace_helptrace_buffer == NULL); 15291 dtrace_helptrace_buffer = 15292 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15293 dtrace_helptrace_next = 0; 15294 } 15295 15296 /* 15297 * If there are already providers, we must ask them to provide their 15298 * probes, and then match any anonymous enabling against them. Note 15299 * that there should be no other retained enablings at this time: 15300 * the only retained enablings at this time should be the anonymous 15301 * enabling. 15302 */ 15303 if (dtrace_anon.dta_enabling != NULL) { 15304 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15305 15306 dtrace_enabling_provide(NULL); 15307 state = dtrace_anon.dta_state; 15308 15309 /* 15310 * We couldn't hold cpu_lock across the above call to 15311 * dtrace_enabling_provide(), but we must hold it to actually 15312 * enable the probes. We have to drop all of our locks, pick 15313 * up cpu_lock, and regain our locks before matching the 15314 * retained anonymous enabling. 15315 */ 15316 mutex_exit(&dtrace_lock); 15317 mutex_exit(&dtrace_provider_lock); 15318 15319 mutex_enter(&cpu_lock); 15320 mutex_enter(&dtrace_provider_lock); 15321 mutex_enter(&dtrace_lock); 15322 15323 if ((enab = dtrace_anon.dta_enabling) != NULL) 15324 (void) dtrace_enabling_match(enab, NULL); 15325 15326 mutex_exit(&cpu_lock); 15327 } 15328 15329 mutex_exit(&dtrace_lock); 15330 mutex_exit(&dtrace_provider_lock); 15331 15332 if (state != NULL) { 15333 /* 15334 * If we created any anonymous state, set it going now. 15335 */ 15336 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15337 } 15338 15339 return (DDI_SUCCESS); 15340 } 15341 #endif 15342 15343 #if !defined(sun) 15344 #if __FreeBSD_version >= 800039 15345 static void 15346 dtrace_dtr(void *data __unused) 15347 { 15348 } 15349 #endif 15350 #endif 15351 15352 /*ARGSUSED*/ 15353 static int 15354 #if defined(sun) 15355 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15356 #else 15357 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15358 #endif 15359 { 15360 dtrace_state_t *state; 15361 uint32_t priv; 15362 uid_t uid; 15363 zoneid_t zoneid; 15364 15365 #if defined(sun) 15366 if (getminor(*devp) == DTRACEMNRN_HELPER) 15367 return (0); 15368 15369 /* 15370 * If this wasn't an open with the "helper" minor, then it must be 15371 * the "dtrace" minor. 15372 */ 15373 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15374 #else 15375 cred_t *cred_p = NULL; 15376 15377 #if __FreeBSD_version < 800039 15378 /* 15379 * The first minor device is the one that is cloned so there is 15380 * nothing more to do here. 15381 */ 15382 if (dev2unit(dev) == 0) 15383 return 0; 15384 15385 /* 15386 * Devices are cloned, so if the DTrace state has already 15387 * been allocated, that means this device belongs to a 15388 * different client. Each client should open '/dev/dtrace' 15389 * to get a cloned device. 15390 */ 15391 if (dev->si_drv1 != NULL) 15392 return (EBUSY); 15393 #endif 15394 15395 cred_p = dev->si_cred; 15396 #endif 15397 15398 /* 15399 * If no DTRACE_PRIV_* bits are set in the credential, then the 15400 * caller lacks sufficient permission to do anything with DTrace. 15401 */ 15402 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15403 if (priv == DTRACE_PRIV_NONE) { 15404 #if !defined(sun) 15405 #if __FreeBSD_version < 800039 15406 /* Destroy the cloned device. */ 15407 destroy_dev(dev); 15408 #endif 15409 #endif 15410 15411 return (EACCES); 15412 } 15413 15414 /* 15415 * Ask all providers to provide all their probes. 15416 */ 15417 mutex_enter(&dtrace_provider_lock); 15418 dtrace_probe_provide(NULL, NULL); 15419 mutex_exit(&dtrace_provider_lock); 15420 15421 mutex_enter(&cpu_lock); 15422 mutex_enter(&dtrace_lock); 15423 dtrace_opens++; 15424 dtrace_membar_producer(); 15425 15426 #if defined(sun) 15427 /* 15428 * If the kernel debugger is active (that is, if the kernel debugger 15429 * modified text in some way), we won't allow the open. 15430 */ 15431 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15432 dtrace_opens--; 15433 mutex_exit(&cpu_lock); 15434 mutex_exit(&dtrace_lock); 15435 return (EBUSY); 15436 } 15437 15438 state = dtrace_state_create(devp, cred_p); 15439 #else 15440 state = dtrace_state_create(dev); 15441 #if __FreeBSD_version < 800039 15442 dev->si_drv1 = state; 15443 #else 15444 devfs_set_cdevpriv(state, dtrace_dtr); 15445 #endif 15446 #endif 15447 15448 mutex_exit(&cpu_lock); 15449 15450 if (state == NULL) { 15451 #if defined(sun) 15452 if (--dtrace_opens == 0) 15453 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15454 #else 15455 --dtrace_opens; 15456 #endif 15457 mutex_exit(&dtrace_lock); 15458 #if !defined(sun) 15459 #if __FreeBSD_version < 800039 15460 /* Destroy the cloned device. */ 15461 destroy_dev(dev); 15462 #endif 15463 #endif 15464 return (EAGAIN); 15465 } 15466 15467 mutex_exit(&dtrace_lock); 15468 15469 return (0); 15470 } 15471 15472 /*ARGSUSED*/ 15473 static int 15474 #if defined(sun) 15475 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15476 #else 15477 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15478 #endif 15479 { 15480 #if defined(sun) 15481 minor_t minor = getminor(dev); 15482 dtrace_state_t *state; 15483 15484 if (minor == DTRACEMNRN_HELPER) 15485 return (0); 15486 15487 state = ddi_get_soft_state(dtrace_softstate, minor); 15488 #else 15489 #if __FreeBSD_version < 800039 15490 dtrace_state_t *state = dev->si_drv1; 15491 15492 /* Check if this is not a cloned device. */ 15493 if (dev2unit(dev) == 0) 15494 return (0); 15495 #else 15496 dtrace_state_t *state; 15497 devfs_get_cdevpriv((void **) &state); 15498 #endif 15499 15500 #endif 15501 15502 mutex_enter(&cpu_lock); 15503 mutex_enter(&dtrace_lock); 15504 15505 if (state != NULL) { 15506 if (state->dts_anon) { 15507 /* 15508 * There is anonymous state. Destroy that first. 15509 */ 15510 ASSERT(dtrace_anon.dta_state == NULL); 15511 dtrace_state_destroy(state->dts_anon); 15512 } 15513 15514 dtrace_state_destroy(state); 15515 15516 #if !defined(sun) 15517 kmem_free(state, 0); 15518 #if __FreeBSD_version < 800039 15519 dev->si_drv1 = NULL; 15520 #else 15521 devfs_clear_cdevpriv(); 15522 #endif 15523 #endif 15524 } 15525 15526 ASSERT(dtrace_opens > 0); 15527 #if defined(sun) 15528 if (--dtrace_opens == 0) 15529 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15530 #else 15531 --dtrace_opens; 15532 #endif 15533 15534 mutex_exit(&dtrace_lock); 15535 mutex_exit(&cpu_lock); 15536 15537 #if __FreeBSD_version < 800039 15538 /* Schedule this cloned device to be destroyed. */ 15539 destroy_dev_sched(dev); 15540 #endif 15541 15542 return (0); 15543 } 15544 15545 #if defined(sun) 15546 /*ARGSUSED*/ 15547 static int 15548 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15549 { 15550 int rval; 15551 dof_helper_t help, *dhp = NULL; 15552 15553 switch (cmd) { 15554 case DTRACEHIOC_ADDDOF: 15555 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15556 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15557 return (EFAULT); 15558 } 15559 15560 dhp = &help; 15561 arg = (intptr_t)help.dofhp_dof; 15562 /*FALLTHROUGH*/ 15563 15564 case DTRACEHIOC_ADD: { 15565 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15566 15567 if (dof == NULL) 15568 return (rval); 15569 15570 mutex_enter(&dtrace_lock); 15571 15572 /* 15573 * dtrace_helper_slurp() takes responsibility for the dof -- 15574 * it may free it now or it may save it and free it later. 15575 */ 15576 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15577 *rv = rval; 15578 rval = 0; 15579 } else { 15580 rval = EINVAL; 15581 } 15582 15583 mutex_exit(&dtrace_lock); 15584 return (rval); 15585 } 15586 15587 case DTRACEHIOC_REMOVE: { 15588 mutex_enter(&dtrace_lock); 15589 rval = dtrace_helper_destroygen(arg); 15590 mutex_exit(&dtrace_lock); 15591 15592 return (rval); 15593 } 15594 15595 default: 15596 break; 15597 } 15598 15599 return (ENOTTY); 15600 } 15601 15602 /*ARGSUSED*/ 15603 static int 15604 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15605 { 15606 minor_t minor = getminor(dev); 15607 dtrace_state_t *state; 15608 int rval; 15609 15610 if (minor == DTRACEMNRN_HELPER) 15611 return (dtrace_ioctl_helper(cmd, arg, rv)); 15612 15613 state = ddi_get_soft_state(dtrace_softstate, minor); 15614 15615 if (state->dts_anon) { 15616 ASSERT(dtrace_anon.dta_state == NULL); 15617 state = state->dts_anon; 15618 } 15619 15620 switch (cmd) { 15621 case DTRACEIOC_PROVIDER: { 15622 dtrace_providerdesc_t pvd; 15623 dtrace_provider_t *pvp; 15624 15625 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15626 return (EFAULT); 15627 15628 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15629 mutex_enter(&dtrace_provider_lock); 15630 15631 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15632 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15633 break; 15634 } 15635 15636 mutex_exit(&dtrace_provider_lock); 15637 15638 if (pvp == NULL) 15639 return (ESRCH); 15640 15641 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15642 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15643 15644 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15645 return (EFAULT); 15646 15647 return (0); 15648 } 15649 15650 case DTRACEIOC_EPROBE: { 15651 dtrace_eprobedesc_t epdesc; 15652 dtrace_ecb_t *ecb; 15653 dtrace_action_t *act; 15654 void *buf; 15655 size_t size; 15656 uintptr_t dest; 15657 int nrecs; 15658 15659 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15660 return (EFAULT); 15661 15662 mutex_enter(&dtrace_lock); 15663 15664 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15665 mutex_exit(&dtrace_lock); 15666 return (EINVAL); 15667 } 15668 15669 if (ecb->dte_probe == NULL) { 15670 mutex_exit(&dtrace_lock); 15671 return (EINVAL); 15672 } 15673 15674 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15675 epdesc.dtepd_uarg = ecb->dte_uarg; 15676 epdesc.dtepd_size = ecb->dte_size; 15677 15678 nrecs = epdesc.dtepd_nrecs; 15679 epdesc.dtepd_nrecs = 0; 15680 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15681 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15682 continue; 15683 15684 epdesc.dtepd_nrecs++; 15685 } 15686 15687 /* 15688 * Now that we have the size, we need to allocate a temporary 15689 * buffer in which to store the complete description. We need 15690 * the temporary buffer to be able to drop dtrace_lock() 15691 * across the copyout(), below. 15692 */ 15693 size = sizeof (dtrace_eprobedesc_t) + 15694 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15695 15696 buf = kmem_alloc(size, KM_SLEEP); 15697 dest = (uintptr_t)buf; 15698 15699 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15700 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15701 15702 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15703 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15704 continue; 15705 15706 if (nrecs-- == 0) 15707 break; 15708 15709 bcopy(&act->dta_rec, (void *)dest, 15710 sizeof (dtrace_recdesc_t)); 15711 dest += sizeof (dtrace_recdesc_t); 15712 } 15713 15714 mutex_exit(&dtrace_lock); 15715 15716 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15717 kmem_free(buf, size); 15718 return (EFAULT); 15719 } 15720 15721 kmem_free(buf, size); 15722 return (0); 15723 } 15724 15725 case DTRACEIOC_AGGDESC: { 15726 dtrace_aggdesc_t aggdesc; 15727 dtrace_action_t *act; 15728 dtrace_aggregation_t *agg; 15729 int nrecs; 15730 uint32_t offs; 15731 dtrace_recdesc_t *lrec; 15732 void *buf; 15733 size_t size; 15734 uintptr_t dest; 15735 15736 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15737 return (EFAULT); 15738 15739 mutex_enter(&dtrace_lock); 15740 15741 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15742 mutex_exit(&dtrace_lock); 15743 return (EINVAL); 15744 } 15745 15746 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15747 15748 nrecs = aggdesc.dtagd_nrecs; 15749 aggdesc.dtagd_nrecs = 0; 15750 15751 offs = agg->dtag_base; 15752 lrec = &agg->dtag_action.dta_rec; 15753 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15754 15755 for (act = agg->dtag_first; ; act = act->dta_next) { 15756 ASSERT(act->dta_intuple || 15757 DTRACEACT_ISAGG(act->dta_kind)); 15758 15759 /* 15760 * If this action has a record size of zero, it 15761 * denotes an argument to the aggregating action. 15762 * Because the presence of this record doesn't (or 15763 * shouldn't) affect the way the data is interpreted, 15764 * we don't copy it out to save user-level the 15765 * confusion of dealing with a zero-length record. 15766 */ 15767 if (act->dta_rec.dtrd_size == 0) { 15768 ASSERT(agg->dtag_hasarg); 15769 continue; 15770 } 15771 15772 aggdesc.dtagd_nrecs++; 15773 15774 if (act == &agg->dtag_action) 15775 break; 15776 } 15777 15778 /* 15779 * Now that we have the size, we need to allocate a temporary 15780 * buffer in which to store the complete description. We need 15781 * the temporary buffer to be able to drop dtrace_lock() 15782 * across the copyout(), below. 15783 */ 15784 size = sizeof (dtrace_aggdesc_t) + 15785 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15786 15787 buf = kmem_alloc(size, KM_SLEEP); 15788 dest = (uintptr_t)buf; 15789 15790 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15791 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15792 15793 for (act = agg->dtag_first; ; act = act->dta_next) { 15794 dtrace_recdesc_t rec = act->dta_rec; 15795 15796 /* 15797 * See the comment in the above loop for why we pass 15798 * over zero-length records. 15799 */ 15800 if (rec.dtrd_size == 0) { 15801 ASSERT(agg->dtag_hasarg); 15802 continue; 15803 } 15804 15805 if (nrecs-- == 0) 15806 break; 15807 15808 rec.dtrd_offset -= offs; 15809 bcopy(&rec, (void *)dest, sizeof (rec)); 15810 dest += sizeof (dtrace_recdesc_t); 15811 15812 if (act == &agg->dtag_action) 15813 break; 15814 } 15815 15816 mutex_exit(&dtrace_lock); 15817 15818 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15819 kmem_free(buf, size); 15820 return (EFAULT); 15821 } 15822 15823 kmem_free(buf, size); 15824 return (0); 15825 } 15826 15827 case DTRACEIOC_ENABLE: { 15828 dof_hdr_t *dof; 15829 dtrace_enabling_t *enab = NULL; 15830 dtrace_vstate_t *vstate; 15831 int err = 0; 15832 15833 *rv = 0; 15834 15835 /* 15836 * If a NULL argument has been passed, we take this as our 15837 * cue to reevaluate our enablings. 15838 */ 15839 if (arg == NULL) { 15840 dtrace_enabling_matchall(); 15841 15842 return (0); 15843 } 15844 15845 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15846 return (rval); 15847 15848 mutex_enter(&cpu_lock); 15849 mutex_enter(&dtrace_lock); 15850 vstate = &state->dts_vstate; 15851 15852 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15853 mutex_exit(&dtrace_lock); 15854 mutex_exit(&cpu_lock); 15855 dtrace_dof_destroy(dof); 15856 return (EBUSY); 15857 } 15858 15859 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15860 mutex_exit(&dtrace_lock); 15861 mutex_exit(&cpu_lock); 15862 dtrace_dof_destroy(dof); 15863 return (EINVAL); 15864 } 15865 15866 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15867 dtrace_enabling_destroy(enab); 15868 mutex_exit(&dtrace_lock); 15869 mutex_exit(&cpu_lock); 15870 dtrace_dof_destroy(dof); 15871 return (rval); 15872 } 15873 15874 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15875 err = dtrace_enabling_retain(enab); 15876 } else { 15877 dtrace_enabling_destroy(enab); 15878 } 15879 15880 mutex_exit(&cpu_lock); 15881 mutex_exit(&dtrace_lock); 15882 dtrace_dof_destroy(dof); 15883 15884 return (err); 15885 } 15886 15887 case DTRACEIOC_REPLICATE: { 15888 dtrace_repldesc_t desc; 15889 dtrace_probedesc_t *match = &desc.dtrpd_match; 15890 dtrace_probedesc_t *create = &desc.dtrpd_create; 15891 int err; 15892 15893 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15894 return (EFAULT); 15895 15896 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15897 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15898 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15899 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15900 15901 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15902 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15903 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15904 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15905 15906 mutex_enter(&dtrace_lock); 15907 err = dtrace_enabling_replicate(state, match, create); 15908 mutex_exit(&dtrace_lock); 15909 15910 return (err); 15911 } 15912 15913 case DTRACEIOC_PROBEMATCH: 15914 case DTRACEIOC_PROBES: { 15915 dtrace_probe_t *probe = NULL; 15916 dtrace_probedesc_t desc; 15917 dtrace_probekey_t pkey; 15918 dtrace_id_t i; 15919 int m = 0; 15920 uint32_t priv; 15921 uid_t uid; 15922 zoneid_t zoneid; 15923 15924 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15925 return (EFAULT); 15926 15927 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15928 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15929 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15930 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15931 15932 /* 15933 * Before we attempt to match this probe, we want to give 15934 * all providers the opportunity to provide it. 15935 */ 15936 if (desc.dtpd_id == DTRACE_IDNONE) { 15937 mutex_enter(&dtrace_provider_lock); 15938 dtrace_probe_provide(&desc, NULL); 15939 mutex_exit(&dtrace_provider_lock); 15940 desc.dtpd_id++; 15941 } 15942 15943 if (cmd == DTRACEIOC_PROBEMATCH) { 15944 dtrace_probekey(&desc, &pkey); 15945 pkey.dtpk_id = DTRACE_IDNONE; 15946 } 15947 15948 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15949 15950 mutex_enter(&dtrace_lock); 15951 15952 if (cmd == DTRACEIOC_PROBEMATCH) { 15953 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15954 if ((probe = dtrace_probes[i - 1]) != NULL && 15955 (m = dtrace_match_probe(probe, &pkey, 15956 priv, uid, zoneid)) != 0) 15957 break; 15958 } 15959 15960 if (m < 0) { 15961 mutex_exit(&dtrace_lock); 15962 return (EINVAL); 15963 } 15964 15965 } else { 15966 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15967 if ((probe = dtrace_probes[i - 1]) != NULL && 15968 dtrace_match_priv(probe, priv, uid, zoneid)) 15969 break; 15970 } 15971 } 15972 15973 if (probe == NULL) { 15974 mutex_exit(&dtrace_lock); 15975 return (ESRCH); 15976 } 15977 15978 dtrace_probe_description(probe, &desc); 15979 mutex_exit(&dtrace_lock); 15980 15981 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15982 return (EFAULT); 15983 15984 return (0); 15985 } 15986 15987 case DTRACEIOC_PROBEARG: { 15988 dtrace_argdesc_t desc; 15989 dtrace_probe_t *probe; 15990 dtrace_provider_t *prov; 15991 15992 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15993 return (EFAULT); 15994 15995 if (desc.dtargd_id == DTRACE_IDNONE) 15996 return (EINVAL); 15997 15998 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15999 return (EINVAL); 16000 16001 mutex_enter(&dtrace_provider_lock); 16002 mutex_enter(&mod_lock); 16003 mutex_enter(&dtrace_lock); 16004 16005 if (desc.dtargd_id > dtrace_nprobes) { 16006 mutex_exit(&dtrace_lock); 16007 mutex_exit(&mod_lock); 16008 mutex_exit(&dtrace_provider_lock); 16009 return (EINVAL); 16010 } 16011 16012 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16013 mutex_exit(&dtrace_lock); 16014 mutex_exit(&mod_lock); 16015 mutex_exit(&dtrace_provider_lock); 16016 return (EINVAL); 16017 } 16018 16019 mutex_exit(&dtrace_lock); 16020 16021 prov = probe->dtpr_provider; 16022 16023 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16024 /* 16025 * There isn't any typed information for this probe. 16026 * Set the argument number to DTRACE_ARGNONE. 16027 */ 16028 desc.dtargd_ndx = DTRACE_ARGNONE; 16029 } else { 16030 desc.dtargd_native[0] = '\0'; 16031 desc.dtargd_xlate[0] = '\0'; 16032 desc.dtargd_mapping = desc.dtargd_ndx; 16033 16034 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16035 probe->dtpr_id, probe->dtpr_arg, &desc); 16036 } 16037 16038 mutex_exit(&mod_lock); 16039 mutex_exit(&dtrace_provider_lock); 16040 16041 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16042 return (EFAULT); 16043 16044 return (0); 16045 } 16046 16047 case DTRACEIOC_GO: { 16048 processorid_t cpuid; 16049 rval = dtrace_state_go(state, &cpuid); 16050 16051 if (rval != 0) 16052 return (rval); 16053 16054 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16055 return (EFAULT); 16056 16057 return (0); 16058 } 16059 16060 case DTRACEIOC_STOP: { 16061 processorid_t cpuid; 16062 16063 mutex_enter(&dtrace_lock); 16064 rval = dtrace_state_stop(state, &cpuid); 16065 mutex_exit(&dtrace_lock); 16066 16067 if (rval != 0) 16068 return (rval); 16069 16070 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16071 return (EFAULT); 16072 16073 return (0); 16074 } 16075 16076 case DTRACEIOC_DOFGET: { 16077 dof_hdr_t hdr, *dof; 16078 uint64_t len; 16079 16080 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16081 return (EFAULT); 16082 16083 mutex_enter(&dtrace_lock); 16084 dof = dtrace_dof_create(state); 16085 mutex_exit(&dtrace_lock); 16086 16087 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16088 rval = copyout(dof, (void *)arg, len); 16089 dtrace_dof_destroy(dof); 16090 16091 return (rval == 0 ? 0 : EFAULT); 16092 } 16093 16094 case DTRACEIOC_AGGSNAP: 16095 case DTRACEIOC_BUFSNAP: { 16096 dtrace_bufdesc_t desc; 16097 caddr_t cached; 16098 dtrace_buffer_t *buf; 16099 16100 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16101 return (EFAULT); 16102 16103 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16104 return (EINVAL); 16105 16106 mutex_enter(&dtrace_lock); 16107 16108 if (cmd == DTRACEIOC_BUFSNAP) { 16109 buf = &state->dts_buffer[desc.dtbd_cpu]; 16110 } else { 16111 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16112 } 16113 16114 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16115 size_t sz = buf->dtb_offset; 16116 16117 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16118 mutex_exit(&dtrace_lock); 16119 return (EBUSY); 16120 } 16121 16122 /* 16123 * If this buffer has already been consumed, we're 16124 * going to indicate that there's nothing left here 16125 * to consume. 16126 */ 16127 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16128 mutex_exit(&dtrace_lock); 16129 16130 desc.dtbd_size = 0; 16131 desc.dtbd_drops = 0; 16132 desc.dtbd_errors = 0; 16133 desc.dtbd_oldest = 0; 16134 sz = sizeof (desc); 16135 16136 if (copyout(&desc, (void *)arg, sz) != 0) 16137 return (EFAULT); 16138 16139 return (0); 16140 } 16141 16142 /* 16143 * If this is a ring buffer that has wrapped, we want 16144 * to copy the whole thing out. 16145 */ 16146 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16147 dtrace_buffer_polish(buf); 16148 sz = buf->dtb_size; 16149 } 16150 16151 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16152 mutex_exit(&dtrace_lock); 16153 return (EFAULT); 16154 } 16155 16156 desc.dtbd_size = sz; 16157 desc.dtbd_drops = buf->dtb_drops; 16158 desc.dtbd_errors = buf->dtb_errors; 16159 desc.dtbd_oldest = buf->dtb_xamot_offset; 16160 16161 mutex_exit(&dtrace_lock); 16162 16163 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16164 return (EFAULT); 16165 16166 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16167 16168 return (0); 16169 } 16170 16171 if (buf->dtb_tomax == NULL) { 16172 ASSERT(buf->dtb_xamot == NULL); 16173 mutex_exit(&dtrace_lock); 16174 return (ENOENT); 16175 } 16176 16177 cached = buf->dtb_tomax; 16178 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16179 16180 dtrace_xcall(desc.dtbd_cpu, 16181 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16182 16183 state->dts_errors += buf->dtb_xamot_errors; 16184 16185 /* 16186 * If the buffers did not actually switch, then the cross call 16187 * did not take place -- presumably because the given CPU is 16188 * not in the ready set. If this is the case, we'll return 16189 * ENOENT. 16190 */ 16191 if (buf->dtb_tomax == cached) { 16192 ASSERT(buf->dtb_xamot != cached); 16193 mutex_exit(&dtrace_lock); 16194 return (ENOENT); 16195 } 16196 16197 ASSERT(cached == buf->dtb_xamot); 16198 16199 /* 16200 * We have our snapshot; now copy it out. 16201 */ 16202 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16203 buf->dtb_xamot_offset) != 0) { 16204 mutex_exit(&dtrace_lock); 16205 return (EFAULT); 16206 } 16207 16208 desc.dtbd_size = buf->dtb_xamot_offset; 16209 desc.dtbd_drops = buf->dtb_xamot_drops; 16210 desc.dtbd_errors = buf->dtb_xamot_errors; 16211 desc.dtbd_oldest = 0; 16212 16213 mutex_exit(&dtrace_lock); 16214 16215 /* 16216 * Finally, copy out the buffer description. 16217 */ 16218 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16219 return (EFAULT); 16220 16221 return (0); 16222 } 16223 16224 case DTRACEIOC_CONF: { 16225 dtrace_conf_t conf; 16226 16227 bzero(&conf, sizeof (conf)); 16228 conf.dtc_difversion = DIF_VERSION; 16229 conf.dtc_difintregs = DIF_DIR_NREGS; 16230 conf.dtc_diftupregs = DIF_DTR_NREGS; 16231 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16232 16233 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16234 return (EFAULT); 16235 16236 return (0); 16237 } 16238 16239 case DTRACEIOC_STATUS: { 16240 dtrace_status_t stat; 16241 dtrace_dstate_t *dstate; 16242 int i, j; 16243 uint64_t nerrs; 16244 16245 /* 16246 * See the comment in dtrace_state_deadman() for the reason 16247 * for setting dts_laststatus to INT64_MAX before setting 16248 * it to the correct value. 16249 */ 16250 state->dts_laststatus = INT64_MAX; 16251 dtrace_membar_producer(); 16252 state->dts_laststatus = dtrace_gethrtime(); 16253 16254 bzero(&stat, sizeof (stat)); 16255 16256 mutex_enter(&dtrace_lock); 16257 16258 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16259 mutex_exit(&dtrace_lock); 16260 return (ENOENT); 16261 } 16262 16263 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16264 stat.dtst_exiting = 1; 16265 16266 nerrs = state->dts_errors; 16267 dstate = &state->dts_vstate.dtvs_dynvars; 16268 16269 for (i = 0; i < NCPU; i++) { 16270 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16271 16272 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16273 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16274 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16275 16276 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16277 stat.dtst_filled++; 16278 16279 nerrs += state->dts_buffer[i].dtb_errors; 16280 16281 for (j = 0; j < state->dts_nspeculations; j++) { 16282 dtrace_speculation_t *spec; 16283 dtrace_buffer_t *buf; 16284 16285 spec = &state->dts_speculations[j]; 16286 buf = &spec->dtsp_buffer[i]; 16287 stat.dtst_specdrops += buf->dtb_xamot_drops; 16288 } 16289 } 16290 16291 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16292 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16293 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16294 stat.dtst_dblerrors = state->dts_dblerrors; 16295 stat.dtst_killed = 16296 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16297 stat.dtst_errors = nerrs; 16298 16299 mutex_exit(&dtrace_lock); 16300 16301 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16302 return (EFAULT); 16303 16304 return (0); 16305 } 16306 16307 case DTRACEIOC_FORMAT: { 16308 dtrace_fmtdesc_t fmt; 16309 char *str; 16310 int len; 16311 16312 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16313 return (EFAULT); 16314 16315 mutex_enter(&dtrace_lock); 16316 16317 if (fmt.dtfd_format == 0 || 16318 fmt.dtfd_format > state->dts_nformats) { 16319 mutex_exit(&dtrace_lock); 16320 return (EINVAL); 16321 } 16322 16323 /* 16324 * Format strings are allocated contiguously and they are 16325 * never freed; if a format index is less than the number 16326 * of formats, we can assert that the format map is non-NULL 16327 * and that the format for the specified index is non-NULL. 16328 */ 16329 ASSERT(state->dts_formats != NULL); 16330 str = state->dts_formats[fmt.dtfd_format - 1]; 16331 ASSERT(str != NULL); 16332 16333 len = strlen(str) + 1; 16334 16335 if (len > fmt.dtfd_length) { 16336 fmt.dtfd_length = len; 16337 16338 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16339 mutex_exit(&dtrace_lock); 16340 return (EINVAL); 16341 } 16342 } else { 16343 if (copyout(str, fmt.dtfd_string, len) != 0) { 16344 mutex_exit(&dtrace_lock); 16345 return (EINVAL); 16346 } 16347 } 16348 16349 mutex_exit(&dtrace_lock); 16350 return (0); 16351 } 16352 16353 default: 16354 break; 16355 } 16356 16357 return (ENOTTY); 16358 } 16359 16360 /*ARGSUSED*/ 16361 static int 16362 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16363 { 16364 dtrace_state_t *state; 16365 16366 switch (cmd) { 16367 case DDI_DETACH: 16368 break; 16369 16370 case DDI_SUSPEND: 16371 return (DDI_SUCCESS); 16372 16373 default: 16374 return (DDI_FAILURE); 16375 } 16376 16377 mutex_enter(&cpu_lock); 16378 mutex_enter(&dtrace_provider_lock); 16379 mutex_enter(&dtrace_lock); 16380 16381 ASSERT(dtrace_opens == 0); 16382 16383 if (dtrace_helpers > 0) { 16384 mutex_exit(&dtrace_provider_lock); 16385 mutex_exit(&dtrace_lock); 16386 mutex_exit(&cpu_lock); 16387 return (DDI_FAILURE); 16388 } 16389 16390 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16391 mutex_exit(&dtrace_provider_lock); 16392 mutex_exit(&dtrace_lock); 16393 mutex_exit(&cpu_lock); 16394 return (DDI_FAILURE); 16395 } 16396 16397 dtrace_provider = NULL; 16398 16399 if ((state = dtrace_anon_grab()) != NULL) { 16400 /* 16401 * If there were ECBs on this state, the provider should 16402 * have not been allowed to detach; assert that there is 16403 * none. 16404 */ 16405 ASSERT(state->dts_necbs == 0); 16406 dtrace_state_destroy(state); 16407 16408 /* 16409 * If we're being detached with anonymous state, we need to 16410 * indicate to the kernel debugger that DTrace is now inactive. 16411 */ 16412 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16413 } 16414 16415 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16416 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16417 dtrace_cpu_init = NULL; 16418 dtrace_helpers_cleanup = NULL; 16419 dtrace_helpers_fork = NULL; 16420 dtrace_cpustart_init = NULL; 16421 dtrace_cpustart_fini = NULL; 16422 dtrace_debugger_init = NULL; 16423 dtrace_debugger_fini = NULL; 16424 dtrace_modload = NULL; 16425 dtrace_modunload = NULL; 16426 16427 mutex_exit(&cpu_lock); 16428 16429 if (dtrace_helptrace_enabled) { 16430 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16431 dtrace_helptrace_buffer = NULL; 16432 } 16433 16434 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16435 dtrace_probes = NULL; 16436 dtrace_nprobes = 0; 16437 16438 dtrace_hash_destroy(dtrace_bymod); 16439 dtrace_hash_destroy(dtrace_byfunc); 16440 dtrace_hash_destroy(dtrace_byname); 16441 dtrace_bymod = NULL; 16442 dtrace_byfunc = NULL; 16443 dtrace_byname = NULL; 16444 16445 kmem_cache_destroy(dtrace_state_cache); 16446 vmem_destroy(dtrace_minor); 16447 vmem_destroy(dtrace_arena); 16448 16449 if (dtrace_toxrange != NULL) { 16450 kmem_free(dtrace_toxrange, 16451 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16452 dtrace_toxrange = NULL; 16453 dtrace_toxranges = 0; 16454 dtrace_toxranges_max = 0; 16455 } 16456 16457 ddi_remove_minor_node(dtrace_devi, NULL); 16458 dtrace_devi = NULL; 16459 16460 ddi_soft_state_fini(&dtrace_softstate); 16461 16462 ASSERT(dtrace_vtime_references == 0); 16463 ASSERT(dtrace_opens == 0); 16464 ASSERT(dtrace_retained == NULL); 16465 16466 mutex_exit(&dtrace_lock); 16467 mutex_exit(&dtrace_provider_lock); 16468 16469 /* 16470 * We don't destroy the task queue until after we have dropped our 16471 * locks (taskq_destroy() may block on running tasks). To prevent 16472 * attempting to do work after we have effectively detached but before 16473 * the task queue has been destroyed, all tasks dispatched via the 16474 * task queue must check that DTrace is still attached before 16475 * performing any operation. 16476 */ 16477 taskq_destroy(dtrace_taskq); 16478 dtrace_taskq = NULL; 16479 16480 return (DDI_SUCCESS); 16481 } 16482 #endif 16483 16484 #if defined(sun) 16485 /*ARGSUSED*/ 16486 static int 16487 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16488 { 16489 int error; 16490 16491 switch (infocmd) { 16492 case DDI_INFO_DEVT2DEVINFO: 16493 *result = (void *)dtrace_devi; 16494 error = DDI_SUCCESS; 16495 break; 16496 case DDI_INFO_DEVT2INSTANCE: 16497 *result = (void *)0; 16498 error = DDI_SUCCESS; 16499 break; 16500 default: 16501 error = DDI_FAILURE; 16502 } 16503 return (error); 16504 } 16505 #endif 16506 16507 #if defined(sun) 16508 static struct cb_ops dtrace_cb_ops = { 16509 dtrace_open, /* open */ 16510 dtrace_close, /* close */ 16511 nulldev, /* strategy */ 16512 nulldev, /* print */ 16513 nodev, /* dump */ 16514 nodev, /* read */ 16515 nodev, /* write */ 16516 dtrace_ioctl, /* ioctl */ 16517 nodev, /* devmap */ 16518 nodev, /* mmap */ 16519 nodev, /* segmap */ 16520 nochpoll, /* poll */ 16521 ddi_prop_op, /* cb_prop_op */ 16522 0, /* streamtab */ 16523 D_NEW | D_MP /* Driver compatibility flag */ 16524 }; 16525 16526 static struct dev_ops dtrace_ops = { 16527 DEVO_REV, /* devo_rev */ 16528 0, /* refcnt */ 16529 dtrace_info, /* get_dev_info */ 16530 nulldev, /* identify */ 16531 nulldev, /* probe */ 16532 dtrace_attach, /* attach */ 16533 dtrace_detach, /* detach */ 16534 nodev, /* reset */ 16535 &dtrace_cb_ops, /* driver operations */ 16536 NULL, /* bus operations */ 16537 nodev /* dev power */ 16538 }; 16539 16540 static struct modldrv modldrv = { 16541 &mod_driverops, /* module type (this is a pseudo driver) */ 16542 "Dynamic Tracing", /* name of module */ 16543 &dtrace_ops, /* driver ops */ 16544 }; 16545 16546 static struct modlinkage modlinkage = { 16547 MODREV_1, 16548 (void *)&modldrv, 16549 NULL 16550 }; 16551 16552 int 16553 _init(void) 16554 { 16555 return (mod_install(&modlinkage)); 16556 } 16557 16558 int 16559 _info(struct modinfo *modinfop) 16560 { 16561 return (mod_info(&modlinkage, modinfop)); 16562 } 16563 16564 int 16565 _fini(void) 16566 { 16567 return (mod_remove(&modlinkage)); 16568 } 16569 #else 16570 16571 static d_ioctl_t dtrace_ioctl; 16572 static d_ioctl_t dtrace_ioctl_helper; 16573 static void dtrace_load(void *); 16574 static int dtrace_unload(void); 16575 #if __FreeBSD_version < 800039 16576 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16577 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16578 static eventhandler_tag eh_tag; /* Event handler tag. */ 16579 #else 16580 static struct cdev *dtrace_dev; 16581 static struct cdev *helper_dev; 16582 #endif 16583 16584 void dtrace_invop_init(void); 16585 void dtrace_invop_uninit(void); 16586 16587 static struct cdevsw dtrace_cdevsw = { 16588 .d_version = D_VERSION, 16589 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16590 .d_close = dtrace_close, 16591 .d_ioctl = dtrace_ioctl, 16592 .d_open = dtrace_open, 16593 .d_name = "dtrace", 16594 }; 16595 16596 static struct cdevsw helper_cdevsw = { 16597 .d_version = D_VERSION, 16598 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16599 .d_ioctl = dtrace_ioctl_helper, 16600 .d_name = "helper", 16601 }; 16602 16603 #include <dtrace_anon.c> 16604 #if __FreeBSD_version < 800039 16605 #include <dtrace_clone.c> 16606 #endif 16607 #include <dtrace_ioctl.c> 16608 #include <dtrace_load.c> 16609 #include <dtrace_modevent.c> 16610 #include <dtrace_sysctl.c> 16611 #include <dtrace_unload.c> 16612 #include <dtrace_vtime.c> 16613 #include <dtrace_hacks.c> 16614 #include <dtrace_isa.c> 16615 16616 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16617 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16618 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16619 16620 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16621 MODULE_VERSION(dtrace, 1); 16622 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16623 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16624 #endif 16625