1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70 #include <sys/errno.h> 71 #if !defined(sun) 72 #include <sys/time.h> 73 #endif 74 #include <sys/stat.h> 75 #include <sys/modctl.h> 76 #include <sys/conf.h> 77 #include <sys/systm.h> 78 #if defined(sun) 79 #include <sys/ddi.h> 80 #include <sys/sunddi.h> 81 #endif 82 #include <sys/cpuvar.h> 83 #include <sys/kmem.h> 84 #if defined(sun) 85 #include <sys/strsubr.h> 86 #endif 87 #include <sys/sysmacros.h> 88 #include <sys/dtrace_impl.h> 89 #include <sys/atomic.h> 90 #include <sys/cmn_err.h> 91 #if defined(sun) 92 #include <sys/mutex_impl.h> 93 #include <sys/rwlock_impl.h> 94 #endif 95 #include <sys/ctf_api.h> 96 #if defined(sun) 97 #include <sys/panic.h> 98 #include <sys/priv_impl.h> 99 #endif 100 #include <sys/policy.h> 101 #if defined(sun) 102 #include <sys/cred_impl.h> 103 #include <sys/procfs_isa.h> 104 #endif 105 #include <sys/taskq.h> 106 #if defined(sun) 107 #include <sys/mkdev.h> 108 #include <sys/kdi.h> 109 #endif 110 #include <sys/zone.h> 111 #include <sys/socket.h> 112 #include <netinet/in.h> 113 114 /* FreeBSD includes: */ 115 #if !defined(sun) 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/limits.h> 119 #include <sys/kdb.h> 120 #include <sys/kernel.h> 121 #include <sys/malloc.h> 122 #include <sys/sysctl.h> 123 #include <sys/lock.h> 124 #include <sys/mutex.h> 125 #include <sys/rwlock.h> 126 #include <sys/sx.h> 127 #include <sys/dtrace_bsd.h> 128 #include <netinet/in.h> 129 #include "dtrace_cddl.h" 130 #include "dtrace_debug.c" 131 #endif 132 133 /* 134 * DTrace Tunable Variables 135 * 136 * The following variables may be tuned by adding a line to /etc/system that 137 * includes both the name of the DTrace module ("dtrace") and the name of the 138 * variable. For example: 139 * 140 * set dtrace:dtrace_destructive_disallow = 1 141 * 142 * In general, the only variables that one should be tuning this way are those 143 * that affect system-wide DTrace behavior, and for which the default behavior 144 * is undesirable. Most of these variables are tunable on a per-consumer 145 * basis using DTrace options, and need not be tuned on a system-wide basis. 146 * When tuning these variables, avoid pathological values; while some attempt 147 * is made to verify the integrity of these variables, they are not considered 148 * part of the supported interface to DTrace, and they are therefore not 149 * checked comprehensively. Further, these variables should not be tuned 150 * dynamically via "mdb -kw" or other means; they should only be tuned via 151 * /etc/system. 152 */ 153 int dtrace_destructive_disallow = 0; 154 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 155 size_t dtrace_difo_maxsize = (256 * 1024); 156 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 157 size_t dtrace_global_maxsize = (16 * 1024); 158 size_t dtrace_actions_max = (16 * 1024); 159 size_t dtrace_retain_max = 1024; 160 dtrace_optval_t dtrace_helper_actions_max = 128; 161 dtrace_optval_t dtrace_helper_providers_max = 32; 162 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 163 size_t dtrace_strsize_default = 256; 164 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 165 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 166 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 167 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 168 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 169 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 170 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 171 dtrace_optval_t dtrace_nspec_default = 1; 172 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 173 dtrace_optval_t dtrace_stackframes_default = 20; 174 dtrace_optval_t dtrace_ustackframes_default = 20; 175 dtrace_optval_t dtrace_jstackframes_default = 50; 176 dtrace_optval_t dtrace_jstackstrsize_default = 512; 177 int dtrace_msgdsize_max = 128; 178 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 179 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 180 int dtrace_devdepth_max = 32; 181 int dtrace_err_verbose; 182 hrtime_t dtrace_deadman_interval = NANOSEC; 183 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 184 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 185 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 186 187 /* 188 * DTrace External Variables 189 * 190 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 191 * available to DTrace consumers via the backtick (`) syntax. One of these, 192 * dtrace_zero, is made deliberately so: it is provided as a source of 193 * well-known, zero-filled memory. While this variable is not documented, 194 * it is used by some translators as an implementation detail. 195 */ 196 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 197 198 /* 199 * DTrace Internal Variables 200 */ 201 #if defined(sun) 202 static dev_info_t *dtrace_devi; /* device info */ 203 #endif 204 #if defined(sun) 205 static vmem_t *dtrace_arena; /* probe ID arena */ 206 static vmem_t *dtrace_minor; /* minor number arena */ 207 #else 208 static taskq_t *dtrace_taskq; /* task queue */ 209 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 210 #endif 211 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 212 static int dtrace_nprobes; /* number of probes */ 213 static dtrace_provider_t *dtrace_provider; /* provider list */ 214 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 215 static int dtrace_opens; /* number of opens */ 216 static int dtrace_helpers; /* number of helpers */ 217 #if defined(sun) 218 static void *dtrace_softstate; /* softstate pointer */ 219 #endif 220 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 221 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 222 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 223 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 224 static int dtrace_toxranges; /* number of toxic ranges */ 225 static int dtrace_toxranges_max; /* size of toxic range array */ 226 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 227 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 228 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 229 static kthread_t *dtrace_panicked; /* panicking thread */ 230 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 231 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 232 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 233 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 234 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 235 #if !defined(sun) 236 static struct mtx dtrace_unr_mtx; 237 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 238 int dtrace_in_probe; /* non-zero if executing a probe */ 239 #if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 240 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 241 #endif 242 #endif 243 244 /* 245 * DTrace Locking 246 * DTrace is protected by three (relatively coarse-grained) locks: 247 * 248 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 249 * including enabling state, probes, ECBs, consumer state, helper state, 250 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 251 * probe context is lock-free -- synchronization is handled via the 252 * dtrace_sync() cross call mechanism. 253 * 254 * (2) dtrace_provider_lock is required when manipulating provider state, or 255 * when provider state must be held constant. 256 * 257 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 258 * when meta provider state must be held constant. 259 * 260 * The lock ordering between these three locks is dtrace_meta_lock before 261 * dtrace_provider_lock before dtrace_lock. (In particular, there are 262 * several places where dtrace_provider_lock is held by the framework as it 263 * calls into the providers -- which then call back into the framework, 264 * grabbing dtrace_lock.) 265 * 266 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 267 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 268 * role as a coarse-grained lock; it is acquired before both of these locks. 269 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 270 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 271 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 272 * acquired _between_ dtrace_provider_lock and dtrace_lock. 273 */ 274 static kmutex_t dtrace_lock; /* probe state lock */ 275 static kmutex_t dtrace_provider_lock; /* provider state lock */ 276 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 277 278 #if !defined(sun) 279 /* XXX FreeBSD hacks. */ 280 static kmutex_t mod_lock; 281 282 #define cr_suid cr_svuid 283 #define cr_sgid cr_svgid 284 #define ipaddr_t in_addr_t 285 #define mod_modname pathname 286 #define vuprintf vprintf 287 #define ttoproc(_a) ((_a)->td_proc) 288 #define crgetzoneid(_a) 0 289 #define NCPU MAXCPU 290 #define SNOCD 0 291 #define CPU_ON_INTR(_a) 0 292 293 #define PRIV_EFFECTIVE (1 << 0) 294 #define PRIV_DTRACE_KERNEL (1 << 1) 295 #define PRIV_DTRACE_PROC (1 << 2) 296 #define PRIV_DTRACE_USER (1 << 3) 297 #define PRIV_PROC_OWNER (1 << 4) 298 #define PRIV_PROC_ZONE (1 << 5) 299 #define PRIV_ALL ~0 300 301 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 302 #endif 303 304 #if defined(sun) 305 #define curcpu CPU->cpu_id 306 #endif 307 308 309 /* 310 * DTrace Provider Variables 311 * 312 * These are the variables relating to DTrace as a provider (that is, the 313 * provider of the BEGIN, END, and ERROR probes). 314 */ 315 static dtrace_pattr_t dtrace_provider_attr = { 316 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 317 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 319 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 321 }; 322 323 static void 324 dtrace_nullop(void) 325 {} 326 327 static dtrace_pops_t dtrace_provider_ops = { 328 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 329 (void (*)(void *, modctl_t *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 334 NULL, 335 NULL, 336 NULL, 337 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 338 }; 339 340 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 341 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 342 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 343 344 /* 345 * DTrace Helper Tracing Variables 346 */ 347 uint32_t dtrace_helptrace_next = 0; 348 uint32_t dtrace_helptrace_nlocals; 349 char *dtrace_helptrace_buffer; 350 int dtrace_helptrace_bufsize = 512 * 1024; 351 352 #ifdef DEBUG 353 int dtrace_helptrace_enabled = 1; 354 #else 355 int dtrace_helptrace_enabled = 0; 356 #endif 357 358 /* 359 * DTrace Error Hashing 360 * 361 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 362 * table. This is very useful for checking coverage of tests that are 363 * expected to induce DIF or DOF processing errors, and may be useful for 364 * debugging problems in the DIF code generator or in DOF generation . The 365 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 366 */ 367 #ifdef DEBUG 368 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 369 static const char *dtrace_errlast; 370 static kthread_t *dtrace_errthread; 371 static kmutex_t dtrace_errlock; 372 #endif 373 374 /* 375 * DTrace Macros and Constants 376 * 377 * These are various macros that are useful in various spots in the 378 * implementation, along with a few random constants that have no meaning 379 * outside of the implementation. There is no real structure to this cpp 380 * mishmash -- but is there ever? 381 */ 382 #define DTRACE_HASHSTR(hash, probe) \ 383 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 384 385 #define DTRACE_HASHNEXT(hash, probe) \ 386 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 387 388 #define DTRACE_HASHPREV(hash, probe) \ 389 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 390 391 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 392 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 393 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 394 395 #define DTRACE_AGGHASHSIZE_SLEW 17 396 397 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 398 399 /* 400 * The key for a thread-local variable consists of the lower 61 bits of the 401 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 402 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 403 * equal to a variable identifier. This is necessary (but not sufficient) to 404 * assure that global associative arrays never collide with thread-local 405 * variables. To guarantee that they cannot collide, we must also define the 406 * order for keying dynamic variables. That order is: 407 * 408 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 409 * 410 * Because the variable-key and the tls-key are in orthogonal spaces, there is 411 * no way for a global variable key signature to match a thread-local key 412 * signature. 413 */ 414 #if defined(sun) 415 #define DTRACE_TLS_THRKEY(where) { \ 416 uint_t intr = 0; \ 417 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 418 for (; actv; actv >>= 1) \ 419 intr++; \ 420 ASSERT(intr < (1 << 3)); \ 421 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 422 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 423 } 424 #else 425 #define DTRACE_TLS_THRKEY(where) { \ 426 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 427 uint_t intr = 0; \ 428 uint_t actv = _c->cpu_intr_actv; \ 429 for (; actv; actv >>= 1) \ 430 intr++; \ 431 ASSERT(intr < (1 << 3)); \ 432 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 433 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 434 } 435 #endif 436 437 #define DT_BSWAP_8(x) ((x) & 0xff) 438 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 439 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 440 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 441 442 #define DT_MASK_LO 0x00000000FFFFFFFFULL 443 444 #define DTRACE_STORE(type, tomax, offset, what) \ 445 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 446 447 #ifndef __x86 448 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 449 if (addr & (size - 1)) { \ 450 *flags |= CPU_DTRACE_BADALIGN; \ 451 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 452 return (0); \ 453 } 454 #else 455 #define DTRACE_ALIGNCHECK(addr, size, flags) 456 #endif 457 458 /* 459 * Test whether a range of memory starting at testaddr of size testsz falls 460 * within the range of memory described by addr, sz. We take care to avoid 461 * problems with overflow and underflow of the unsigned quantities, and 462 * disallow all negative sizes. Ranges of size 0 are allowed. 463 */ 464 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 465 ((testaddr) - (baseaddr) < (basesz) && \ 466 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 467 (testaddr) + (testsz) >= (testaddr)) 468 469 /* 470 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 471 * alloc_sz on the righthand side of the comparison in order to avoid overflow 472 * or underflow in the comparison with it. This is simpler than the INRANGE 473 * check above, because we know that the dtms_scratch_ptr is valid in the 474 * range. Allocations of size zero are allowed. 475 */ 476 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 477 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 478 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 479 480 #define DTRACE_LOADFUNC(bits) \ 481 /*CSTYLED*/ \ 482 uint##bits##_t \ 483 dtrace_load##bits(uintptr_t addr) \ 484 { \ 485 size_t size = bits / NBBY; \ 486 /*CSTYLED*/ \ 487 uint##bits##_t rval; \ 488 int i; \ 489 volatile uint16_t *flags = (volatile uint16_t *) \ 490 &cpu_core[curcpu].cpuc_dtrace_flags; \ 491 \ 492 DTRACE_ALIGNCHECK(addr, size, flags); \ 493 \ 494 for (i = 0; i < dtrace_toxranges; i++) { \ 495 if (addr >= dtrace_toxrange[i].dtt_limit) \ 496 continue; \ 497 \ 498 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 499 continue; \ 500 \ 501 /* \ 502 * This address falls within a toxic region; return 0. \ 503 */ \ 504 *flags |= CPU_DTRACE_BADADDR; \ 505 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 506 return (0); \ 507 } \ 508 \ 509 *flags |= CPU_DTRACE_NOFAULT; \ 510 /*CSTYLED*/ \ 511 rval = *((volatile uint##bits##_t *)addr); \ 512 *flags &= ~CPU_DTRACE_NOFAULT; \ 513 \ 514 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 515 } 516 517 #ifdef _LP64 518 #define dtrace_loadptr dtrace_load64 519 #else 520 #define dtrace_loadptr dtrace_load32 521 #endif 522 523 #define DTRACE_DYNHASH_FREE 0 524 #define DTRACE_DYNHASH_SINK 1 525 #define DTRACE_DYNHASH_VALID 2 526 527 #define DTRACE_MATCH_NEXT 0 528 #define DTRACE_MATCH_DONE 1 529 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 530 #define DTRACE_STATE_ALIGN 64 531 532 #define DTRACE_FLAGS2FLT(flags) \ 533 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 534 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 535 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 536 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 537 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 538 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 539 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 540 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 541 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 542 DTRACEFLT_UNKNOWN) 543 544 #define DTRACEACT_ISSTRING(act) \ 545 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 546 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 547 548 /* Function prototype definitions: */ 549 static size_t dtrace_strlen(const char *, size_t); 550 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 551 static void dtrace_enabling_provide(dtrace_provider_t *); 552 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 553 static void dtrace_enabling_matchall(void); 554 static void dtrace_enabling_reap(void); 555 static dtrace_state_t *dtrace_anon_grab(void); 556 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 557 dtrace_state_t *, uint64_t, uint64_t); 558 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 559 static void dtrace_buffer_drop(dtrace_buffer_t *); 560 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 561 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 562 dtrace_state_t *, dtrace_mstate_t *); 563 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 564 dtrace_optval_t); 565 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 566 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 567 uint16_t dtrace_load16(uintptr_t); 568 uint32_t dtrace_load32(uintptr_t); 569 uint64_t dtrace_load64(uintptr_t); 570 uint8_t dtrace_load8(uintptr_t); 571 void dtrace_dynvar_clean(dtrace_dstate_t *); 572 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 573 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 574 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 575 576 /* 577 * DTrace Probe Context Functions 578 * 579 * These functions are called from probe context. Because probe context is 580 * any context in which C may be called, arbitrarily locks may be held, 581 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 582 * As a result, functions called from probe context may only call other DTrace 583 * support functions -- they may not interact at all with the system at large. 584 * (Note that the ASSERT macro is made probe-context safe by redefining it in 585 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 586 * loads are to be performed from probe context, they _must_ be in terms of 587 * the safe dtrace_load*() variants. 588 * 589 * Some functions in this block are not actually called from probe context; 590 * for these functions, there will be a comment above the function reading 591 * "Note: not called from probe context." 592 */ 593 void 594 dtrace_panic(const char *format, ...) 595 { 596 va_list alist; 597 598 va_start(alist, format); 599 dtrace_vpanic(format, alist); 600 va_end(alist); 601 } 602 603 int 604 dtrace_assfail(const char *a, const char *f, int l) 605 { 606 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 607 608 /* 609 * We just need something here that even the most clever compiler 610 * cannot optimize away. 611 */ 612 return (a[(uintptr_t)f]); 613 } 614 615 /* 616 * Atomically increment a specified error counter from probe context. 617 */ 618 static void 619 dtrace_error(uint32_t *counter) 620 { 621 /* 622 * Most counters stored to in probe context are per-CPU counters. 623 * However, there are some error conditions that are sufficiently 624 * arcane that they don't merit per-CPU storage. If these counters 625 * are incremented concurrently on different CPUs, scalability will be 626 * adversely affected -- but we don't expect them to be white-hot in a 627 * correctly constructed enabling... 628 */ 629 uint32_t oval, nval; 630 631 do { 632 oval = *counter; 633 634 if ((nval = oval + 1) == 0) { 635 /* 636 * If the counter would wrap, set it to 1 -- assuring 637 * that the counter is never zero when we have seen 638 * errors. (The counter must be 32-bits because we 639 * aren't guaranteed a 64-bit compare&swap operation.) 640 * To save this code both the infamy of being fingered 641 * by a priggish news story and the indignity of being 642 * the target of a neo-puritan witch trial, we're 643 * carefully avoiding any colorful description of the 644 * likelihood of this condition -- but suffice it to 645 * say that it is only slightly more likely than the 646 * overflow of predicate cache IDs, as discussed in 647 * dtrace_predicate_create(). 648 */ 649 nval = 1; 650 } 651 } while (dtrace_cas32(counter, oval, nval) != oval); 652 } 653 654 /* 655 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 656 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 657 */ 658 DTRACE_LOADFUNC(8) 659 DTRACE_LOADFUNC(16) 660 DTRACE_LOADFUNC(32) 661 DTRACE_LOADFUNC(64) 662 663 static int 664 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 665 { 666 if (dest < mstate->dtms_scratch_base) 667 return (0); 668 669 if (dest + size < dest) 670 return (0); 671 672 if (dest + size > mstate->dtms_scratch_ptr) 673 return (0); 674 675 return (1); 676 } 677 678 static int 679 dtrace_canstore_statvar(uint64_t addr, size_t sz, 680 dtrace_statvar_t **svars, int nsvars) 681 { 682 int i; 683 684 for (i = 0; i < nsvars; i++) { 685 dtrace_statvar_t *svar = svars[i]; 686 687 if (svar == NULL || svar->dtsv_size == 0) 688 continue; 689 690 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 691 return (1); 692 } 693 694 return (0); 695 } 696 697 /* 698 * Check to see if the address is within a memory region to which a store may 699 * be issued. This includes the DTrace scratch areas, and any DTrace variable 700 * region. The caller of dtrace_canstore() is responsible for performing any 701 * alignment checks that are needed before stores are actually executed. 702 */ 703 static int 704 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 705 dtrace_vstate_t *vstate) 706 { 707 /* 708 * First, check to see if the address is in scratch space... 709 */ 710 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 711 mstate->dtms_scratch_size)) 712 return (1); 713 714 /* 715 * Now check to see if it's a dynamic variable. This check will pick 716 * up both thread-local variables and any global dynamically-allocated 717 * variables. 718 */ 719 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 720 vstate->dtvs_dynvars.dtds_size)) { 721 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 722 uintptr_t base = (uintptr_t)dstate->dtds_base + 723 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 724 uintptr_t chunkoffs; 725 726 /* 727 * Before we assume that we can store here, we need to make 728 * sure that it isn't in our metadata -- storing to our 729 * dynamic variable metadata would corrupt our state. For 730 * the range to not include any dynamic variable metadata, 731 * it must: 732 * 733 * (1) Start above the hash table that is at the base of 734 * the dynamic variable space 735 * 736 * (2) Have a starting chunk offset that is beyond the 737 * dtrace_dynvar_t that is at the base of every chunk 738 * 739 * (3) Not span a chunk boundary 740 * 741 */ 742 if (addr < base) 743 return (0); 744 745 chunkoffs = (addr - base) % dstate->dtds_chunksize; 746 747 if (chunkoffs < sizeof (dtrace_dynvar_t)) 748 return (0); 749 750 if (chunkoffs + sz > dstate->dtds_chunksize) 751 return (0); 752 753 return (1); 754 } 755 756 /* 757 * Finally, check the static local and global variables. These checks 758 * take the longest, so we perform them last. 759 */ 760 if (dtrace_canstore_statvar(addr, sz, 761 vstate->dtvs_locals, vstate->dtvs_nlocals)) 762 return (1); 763 764 if (dtrace_canstore_statvar(addr, sz, 765 vstate->dtvs_globals, vstate->dtvs_nglobals)) 766 return (1); 767 768 return (0); 769 } 770 771 772 /* 773 * Convenience routine to check to see if the address is within a memory 774 * region in which a load may be issued given the user's privilege level; 775 * if not, it sets the appropriate error flags and loads 'addr' into the 776 * illegal value slot. 777 * 778 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 779 * appropriate memory access protection. 780 */ 781 static int 782 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 783 dtrace_vstate_t *vstate) 784 { 785 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 786 787 /* 788 * If we hold the privilege to read from kernel memory, then 789 * everything is readable. 790 */ 791 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 792 return (1); 793 794 /* 795 * You can obviously read that which you can store. 796 */ 797 if (dtrace_canstore(addr, sz, mstate, vstate)) 798 return (1); 799 800 /* 801 * We're allowed to read from our own string table. 802 */ 803 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 804 mstate->dtms_difo->dtdo_strlen)) 805 return (1); 806 807 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 808 *illval = addr; 809 return (0); 810 } 811 812 /* 813 * Convenience routine to check to see if a given string is within a memory 814 * region in which a load may be issued given the user's privilege level; 815 * this exists so that we don't need to issue unnecessary dtrace_strlen() 816 * calls in the event that the user has all privileges. 817 */ 818 static int 819 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 820 dtrace_vstate_t *vstate) 821 { 822 size_t strsz; 823 824 /* 825 * If we hold the privilege to read from kernel memory, then 826 * everything is readable. 827 */ 828 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 829 return (1); 830 831 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 832 if (dtrace_canload(addr, strsz, mstate, vstate)) 833 return (1); 834 835 return (0); 836 } 837 838 /* 839 * Convenience routine to check to see if a given variable is within a memory 840 * region in which a load may be issued given the user's privilege level. 841 */ 842 static int 843 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 844 dtrace_vstate_t *vstate) 845 { 846 size_t sz; 847 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 848 849 /* 850 * If we hold the privilege to read from kernel memory, then 851 * everything is readable. 852 */ 853 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 854 return (1); 855 856 if (type->dtdt_kind == DIF_TYPE_STRING) 857 sz = dtrace_strlen(src, 858 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 859 else 860 sz = type->dtdt_size; 861 862 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 863 } 864 865 /* 866 * Compare two strings using safe loads. 867 */ 868 static int 869 dtrace_strncmp(char *s1, char *s2, size_t limit) 870 { 871 uint8_t c1, c2; 872 volatile uint16_t *flags; 873 874 if (s1 == s2 || limit == 0) 875 return (0); 876 877 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 878 879 do { 880 if (s1 == NULL) { 881 c1 = '\0'; 882 } else { 883 c1 = dtrace_load8((uintptr_t)s1++); 884 } 885 886 if (s2 == NULL) { 887 c2 = '\0'; 888 } else { 889 c2 = dtrace_load8((uintptr_t)s2++); 890 } 891 892 if (c1 != c2) 893 return (c1 - c2); 894 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 895 896 return (0); 897 } 898 899 /* 900 * Compute strlen(s) for a string using safe memory accesses. The additional 901 * len parameter is used to specify a maximum length to ensure completion. 902 */ 903 static size_t 904 dtrace_strlen(const char *s, size_t lim) 905 { 906 uint_t len; 907 908 for (len = 0; len != lim; len++) { 909 if (dtrace_load8((uintptr_t)s++) == '\0') 910 break; 911 } 912 913 return (len); 914 } 915 916 /* 917 * Check if an address falls within a toxic region. 918 */ 919 static int 920 dtrace_istoxic(uintptr_t kaddr, size_t size) 921 { 922 uintptr_t taddr, tsize; 923 int i; 924 925 for (i = 0; i < dtrace_toxranges; i++) { 926 taddr = dtrace_toxrange[i].dtt_base; 927 tsize = dtrace_toxrange[i].dtt_limit - taddr; 928 929 if (kaddr - taddr < tsize) { 930 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 931 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 932 return (1); 933 } 934 935 if (taddr - kaddr < size) { 936 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 937 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 938 return (1); 939 } 940 } 941 942 return (0); 943 } 944 945 /* 946 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 947 * memory specified by the DIF program. The dst is assumed to be safe memory 948 * that we can store to directly because it is managed by DTrace. As with 949 * standard bcopy, overlapping copies are handled properly. 950 */ 951 static void 952 dtrace_bcopy(const void *src, void *dst, size_t len) 953 { 954 if (len != 0) { 955 uint8_t *s1 = dst; 956 const uint8_t *s2 = src; 957 958 if (s1 <= s2) { 959 do { 960 *s1++ = dtrace_load8((uintptr_t)s2++); 961 } while (--len != 0); 962 } else { 963 s2 += len; 964 s1 += len; 965 966 do { 967 *--s1 = dtrace_load8((uintptr_t)--s2); 968 } while (--len != 0); 969 } 970 } 971 } 972 973 /* 974 * Copy src to dst using safe memory accesses, up to either the specified 975 * length, or the point that a nul byte is encountered. The src is assumed to 976 * be unsafe memory specified by the DIF program. The dst is assumed to be 977 * safe memory that we can store to directly because it is managed by DTrace. 978 * Unlike dtrace_bcopy(), overlapping regions are not handled. 979 */ 980 static void 981 dtrace_strcpy(const void *src, void *dst, size_t len) 982 { 983 if (len != 0) { 984 uint8_t *s1 = dst, c; 985 const uint8_t *s2 = src; 986 987 do { 988 *s1++ = c = dtrace_load8((uintptr_t)s2++); 989 } while (--len != 0 && c != '\0'); 990 } 991 } 992 993 /* 994 * Copy src to dst, deriving the size and type from the specified (BYREF) 995 * variable type. The src is assumed to be unsafe memory specified by the DIF 996 * program. The dst is assumed to be DTrace variable memory that is of the 997 * specified type; we assume that we can store to directly. 998 */ 999 static void 1000 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1001 { 1002 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1003 1004 if (type->dtdt_kind == DIF_TYPE_STRING) { 1005 dtrace_strcpy(src, dst, type->dtdt_size); 1006 } else { 1007 dtrace_bcopy(src, dst, type->dtdt_size); 1008 } 1009 } 1010 1011 /* 1012 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1013 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1014 * safe memory that we can access directly because it is managed by DTrace. 1015 */ 1016 static int 1017 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1018 { 1019 volatile uint16_t *flags; 1020 1021 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1022 1023 if (s1 == s2) 1024 return (0); 1025 1026 if (s1 == NULL || s2 == NULL) 1027 return (1); 1028 1029 if (s1 != s2 && len != 0) { 1030 const uint8_t *ps1 = s1; 1031 const uint8_t *ps2 = s2; 1032 1033 do { 1034 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1035 return (1); 1036 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1037 } 1038 return (0); 1039 } 1040 1041 /* 1042 * Zero the specified region using a simple byte-by-byte loop. Note that this 1043 * is for safe DTrace-managed memory only. 1044 */ 1045 static void 1046 dtrace_bzero(void *dst, size_t len) 1047 { 1048 uchar_t *cp; 1049 1050 for (cp = dst; len != 0; len--) 1051 *cp++ = 0; 1052 } 1053 1054 static void 1055 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1056 { 1057 uint64_t result[2]; 1058 1059 result[0] = addend1[0] + addend2[0]; 1060 result[1] = addend1[1] + addend2[1] + 1061 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1062 1063 sum[0] = result[0]; 1064 sum[1] = result[1]; 1065 } 1066 1067 /* 1068 * Shift the 128-bit value in a by b. If b is positive, shift left. 1069 * If b is negative, shift right. 1070 */ 1071 static void 1072 dtrace_shift_128(uint64_t *a, int b) 1073 { 1074 uint64_t mask; 1075 1076 if (b == 0) 1077 return; 1078 1079 if (b < 0) { 1080 b = -b; 1081 if (b >= 64) { 1082 a[0] = a[1] >> (b - 64); 1083 a[1] = 0; 1084 } else { 1085 a[0] >>= b; 1086 mask = 1LL << (64 - b); 1087 mask -= 1; 1088 a[0] |= ((a[1] & mask) << (64 - b)); 1089 a[1] >>= b; 1090 } 1091 } else { 1092 if (b >= 64) { 1093 a[1] = a[0] << (b - 64); 1094 a[0] = 0; 1095 } else { 1096 a[1] <<= b; 1097 mask = a[0] >> (64 - b); 1098 a[1] |= mask; 1099 a[0] <<= b; 1100 } 1101 } 1102 } 1103 1104 /* 1105 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1106 * use native multiplication on those, and then re-combine into the 1107 * resulting 128-bit value. 1108 * 1109 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1110 * hi1 * hi2 << 64 + 1111 * hi1 * lo2 << 32 + 1112 * hi2 * lo1 << 32 + 1113 * lo1 * lo2 1114 */ 1115 static void 1116 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1117 { 1118 uint64_t hi1, hi2, lo1, lo2; 1119 uint64_t tmp[2]; 1120 1121 hi1 = factor1 >> 32; 1122 hi2 = factor2 >> 32; 1123 1124 lo1 = factor1 & DT_MASK_LO; 1125 lo2 = factor2 & DT_MASK_LO; 1126 1127 product[0] = lo1 * lo2; 1128 product[1] = hi1 * hi2; 1129 1130 tmp[0] = hi1 * lo2; 1131 tmp[1] = 0; 1132 dtrace_shift_128(tmp, 32); 1133 dtrace_add_128(product, tmp, product); 1134 1135 tmp[0] = hi2 * lo1; 1136 tmp[1] = 0; 1137 dtrace_shift_128(tmp, 32); 1138 dtrace_add_128(product, tmp, product); 1139 } 1140 1141 /* 1142 * This privilege check should be used by actions and subroutines to 1143 * verify that the user credentials of the process that enabled the 1144 * invoking ECB match the target credentials 1145 */ 1146 static int 1147 dtrace_priv_proc_common_user(dtrace_state_t *state) 1148 { 1149 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1150 1151 /* 1152 * We should always have a non-NULL state cred here, since if cred 1153 * is null (anonymous tracing), we fast-path bypass this routine. 1154 */ 1155 ASSERT(s_cr != NULL); 1156 1157 if ((cr = CRED()) != NULL && 1158 s_cr->cr_uid == cr->cr_uid && 1159 s_cr->cr_uid == cr->cr_ruid && 1160 s_cr->cr_uid == cr->cr_suid && 1161 s_cr->cr_gid == cr->cr_gid && 1162 s_cr->cr_gid == cr->cr_rgid && 1163 s_cr->cr_gid == cr->cr_sgid) 1164 return (1); 1165 1166 return (0); 1167 } 1168 1169 /* 1170 * This privilege check should be used by actions and subroutines to 1171 * verify that the zone of the process that enabled the invoking ECB 1172 * matches the target credentials 1173 */ 1174 static int 1175 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1176 { 1177 #if defined(sun) 1178 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1179 1180 /* 1181 * We should always have a non-NULL state cred here, since if cred 1182 * is null (anonymous tracing), we fast-path bypass this routine. 1183 */ 1184 ASSERT(s_cr != NULL); 1185 1186 if ((cr = CRED()) != NULL && 1187 s_cr->cr_zone == cr->cr_zone) 1188 return (1); 1189 1190 return (0); 1191 #else 1192 return (1); 1193 #endif 1194 } 1195 1196 /* 1197 * This privilege check should be used by actions and subroutines to 1198 * verify that the process has not setuid or changed credentials. 1199 */ 1200 static int 1201 dtrace_priv_proc_common_nocd(void) 1202 { 1203 proc_t *proc; 1204 1205 if ((proc = ttoproc(curthread)) != NULL && 1206 !(proc->p_flag & SNOCD)) 1207 return (1); 1208 1209 return (0); 1210 } 1211 1212 static int 1213 dtrace_priv_proc_destructive(dtrace_state_t *state) 1214 { 1215 int action = state->dts_cred.dcr_action; 1216 1217 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1218 dtrace_priv_proc_common_zone(state) == 0) 1219 goto bad; 1220 1221 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1222 dtrace_priv_proc_common_user(state) == 0) 1223 goto bad; 1224 1225 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1226 dtrace_priv_proc_common_nocd() == 0) 1227 goto bad; 1228 1229 return (1); 1230 1231 bad: 1232 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1233 1234 return (0); 1235 } 1236 1237 static int 1238 dtrace_priv_proc_control(dtrace_state_t *state) 1239 { 1240 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1241 return (1); 1242 1243 if (dtrace_priv_proc_common_zone(state) && 1244 dtrace_priv_proc_common_user(state) && 1245 dtrace_priv_proc_common_nocd()) 1246 return (1); 1247 1248 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1249 1250 return (0); 1251 } 1252 1253 static int 1254 dtrace_priv_proc(dtrace_state_t *state) 1255 { 1256 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1257 return (1); 1258 1259 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1260 1261 return (0); 1262 } 1263 1264 static int 1265 dtrace_priv_kernel(dtrace_state_t *state) 1266 { 1267 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1268 return (1); 1269 1270 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1271 1272 return (0); 1273 } 1274 1275 static int 1276 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1277 { 1278 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1279 return (1); 1280 1281 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1282 1283 return (0); 1284 } 1285 1286 /* 1287 * Note: not called from probe context. This function is called 1288 * asynchronously (and at a regular interval) from outside of probe context to 1289 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1290 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1291 */ 1292 void 1293 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1294 { 1295 dtrace_dynvar_t *dirty; 1296 dtrace_dstate_percpu_t *dcpu; 1297 int i, work = 0; 1298 1299 for (i = 0; i < NCPU; i++) { 1300 dcpu = &dstate->dtds_percpu[i]; 1301 1302 ASSERT(dcpu->dtdsc_rinsing == NULL); 1303 1304 /* 1305 * If the dirty list is NULL, there is no dirty work to do. 1306 */ 1307 if (dcpu->dtdsc_dirty == NULL) 1308 continue; 1309 1310 /* 1311 * If the clean list is non-NULL, then we're not going to do 1312 * any work for this CPU -- it means that there has not been 1313 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1314 * since the last time we cleaned house. 1315 */ 1316 if (dcpu->dtdsc_clean != NULL) 1317 continue; 1318 1319 work = 1; 1320 1321 /* 1322 * Atomically move the dirty list aside. 1323 */ 1324 do { 1325 dirty = dcpu->dtdsc_dirty; 1326 1327 /* 1328 * Before we zap the dirty list, set the rinsing list. 1329 * (This allows for a potential assertion in 1330 * dtrace_dynvar(): if a free dynamic variable appears 1331 * on a hash chain, either the dirty list or the 1332 * rinsing list for some CPU must be non-NULL.) 1333 */ 1334 dcpu->dtdsc_rinsing = dirty; 1335 dtrace_membar_producer(); 1336 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1337 dirty, NULL) != dirty); 1338 } 1339 1340 if (!work) { 1341 /* 1342 * We have no work to do; we can simply return. 1343 */ 1344 return; 1345 } 1346 1347 dtrace_sync(); 1348 1349 for (i = 0; i < NCPU; i++) { 1350 dcpu = &dstate->dtds_percpu[i]; 1351 1352 if (dcpu->dtdsc_rinsing == NULL) 1353 continue; 1354 1355 /* 1356 * We are now guaranteed that no hash chain contains a pointer 1357 * into this dirty list; we can make it clean. 1358 */ 1359 ASSERT(dcpu->dtdsc_clean == NULL); 1360 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1361 dcpu->dtdsc_rinsing = NULL; 1362 } 1363 1364 /* 1365 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1366 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1367 * This prevents a race whereby a CPU incorrectly decides that 1368 * the state should be something other than DTRACE_DSTATE_CLEAN 1369 * after dtrace_dynvar_clean() has completed. 1370 */ 1371 dtrace_sync(); 1372 1373 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1374 } 1375 1376 /* 1377 * Depending on the value of the op parameter, this function looks-up, 1378 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1379 * allocation is requested, this function will return a pointer to a 1380 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1381 * variable can be allocated. If NULL is returned, the appropriate counter 1382 * will be incremented. 1383 */ 1384 dtrace_dynvar_t * 1385 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1386 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1387 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1388 { 1389 uint64_t hashval = DTRACE_DYNHASH_VALID; 1390 dtrace_dynhash_t *hash = dstate->dtds_hash; 1391 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1392 processorid_t me = curcpu, cpu = me; 1393 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1394 size_t bucket, ksize; 1395 size_t chunksize = dstate->dtds_chunksize; 1396 uintptr_t kdata, lock, nstate; 1397 uint_t i; 1398 1399 ASSERT(nkeys != 0); 1400 1401 /* 1402 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1403 * algorithm. For the by-value portions, we perform the algorithm in 1404 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1405 * bit, and seems to have only a minute effect on distribution. For 1406 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1407 * over each referenced byte. It's painful to do this, but it's much 1408 * better than pathological hash distribution. The efficacy of the 1409 * hashing algorithm (and a comparison with other algorithms) may be 1410 * found by running the ::dtrace_dynstat MDB dcmd. 1411 */ 1412 for (i = 0; i < nkeys; i++) { 1413 if (key[i].dttk_size == 0) { 1414 uint64_t val = key[i].dttk_value; 1415 1416 hashval += (val >> 48) & 0xffff; 1417 hashval += (hashval << 10); 1418 hashval ^= (hashval >> 6); 1419 1420 hashval += (val >> 32) & 0xffff; 1421 hashval += (hashval << 10); 1422 hashval ^= (hashval >> 6); 1423 1424 hashval += (val >> 16) & 0xffff; 1425 hashval += (hashval << 10); 1426 hashval ^= (hashval >> 6); 1427 1428 hashval += val & 0xffff; 1429 hashval += (hashval << 10); 1430 hashval ^= (hashval >> 6); 1431 } else { 1432 /* 1433 * This is incredibly painful, but it beats the hell 1434 * out of the alternative. 1435 */ 1436 uint64_t j, size = key[i].dttk_size; 1437 uintptr_t base = (uintptr_t)key[i].dttk_value; 1438 1439 if (!dtrace_canload(base, size, mstate, vstate)) 1440 break; 1441 1442 for (j = 0; j < size; j++) { 1443 hashval += dtrace_load8(base + j); 1444 hashval += (hashval << 10); 1445 hashval ^= (hashval >> 6); 1446 } 1447 } 1448 } 1449 1450 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1451 return (NULL); 1452 1453 hashval += (hashval << 3); 1454 hashval ^= (hashval >> 11); 1455 hashval += (hashval << 15); 1456 1457 /* 1458 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1459 * comes out to be one of our two sentinel hash values. If this 1460 * actually happens, we set the hashval to be a value known to be a 1461 * non-sentinel value. 1462 */ 1463 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1464 hashval = DTRACE_DYNHASH_VALID; 1465 1466 /* 1467 * Yes, it's painful to do a divide here. If the cycle count becomes 1468 * important here, tricks can be pulled to reduce it. (However, it's 1469 * critical that hash collisions be kept to an absolute minimum; 1470 * they're much more painful than a divide.) It's better to have a 1471 * solution that generates few collisions and still keeps things 1472 * relatively simple. 1473 */ 1474 bucket = hashval % dstate->dtds_hashsize; 1475 1476 if (op == DTRACE_DYNVAR_DEALLOC) { 1477 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1478 1479 for (;;) { 1480 while ((lock = *lockp) & 1) 1481 continue; 1482 1483 if (dtrace_casptr((volatile void *)lockp, 1484 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1485 break; 1486 } 1487 1488 dtrace_membar_producer(); 1489 } 1490 1491 top: 1492 prev = NULL; 1493 lock = hash[bucket].dtdh_lock; 1494 1495 dtrace_membar_consumer(); 1496 1497 start = hash[bucket].dtdh_chain; 1498 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1499 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1500 op != DTRACE_DYNVAR_DEALLOC)); 1501 1502 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1503 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1504 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1505 1506 if (dvar->dtdv_hashval != hashval) { 1507 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1508 /* 1509 * We've reached the sink, and therefore the 1510 * end of the hash chain; we can kick out of 1511 * the loop knowing that we have seen a valid 1512 * snapshot of state. 1513 */ 1514 ASSERT(dvar->dtdv_next == NULL); 1515 ASSERT(dvar == &dtrace_dynhash_sink); 1516 break; 1517 } 1518 1519 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1520 /* 1521 * We've gone off the rails: somewhere along 1522 * the line, one of the members of this hash 1523 * chain was deleted. Note that we could also 1524 * detect this by simply letting this loop run 1525 * to completion, as we would eventually hit 1526 * the end of the dirty list. However, we 1527 * want to avoid running the length of the 1528 * dirty list unnecessarily (it might be quite 1529 * long), so we catch this as early as 1530 * possible by detecting the hash marker. In 1531 * this case, we simply set dvar to NULL and 1532 * break; the conditional after the loop will 1533 * send us back to top. 1534 */ 1535 dvar = NULL; 1536 break; 1537 } 1538 1539 goto next; 1540 } 1541 1542 if (dtuple->dtt_nkeys != nkeys) 1543 goto next; 1544 1545 for (i = 0; i < nkeys; i++, dkey++) { 1546 if (dkey->dttk_size != key[i].dttk_size) 1547 goto next; /* size or type mismatch */ 1548 1549 if (dkey->dttk_size != 0) { 1550 if (dtrace_bcmp( 1551 (void *)(uintptr_t)key[i].dttk_value, 1552 (void *)(uintptr_t)dkey->dttk_value, 1553 dkey->dttk_size)) 1554 goto next; 1555 } else { 1556 if (dkey->dttk_value != key[i].dttk_value) 1557 goto next; 1558 } 1559 } 1560 1561 if (op != DTRACE_DYNVAR_DEALLOC) 1562 return (dvar); 1563 1564 ASSERT(dvar->dtdv_next == NULL || 1565 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1566 1567 if (prev != NULL) { 1568 ASSERT(hash[bucket].dtdh_chain != dvar); 1569 ASSERT(start != dvar); 1570 ASSERT(prev->dtdv_next == dvar); 1571 prev->dtdv_next = dvar->dtdv_next; 1572 } else { 1573 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1574 start, dvar->dtdv_next) != start) { 1575 /* 1576 * We have failed to atomically swing the 1577 * hash table head pointer, presumably because 1578 * of a conflicting allocation on another CPU. 1579 * We need to reread the hash chain and try 1580 * again. 1581 */ 1582 goto top; 1583 } 1584 } 1585 1586 dtrace_membar_producer(); 1587 1588 /* 1589 * Now set the hash value to indicate that it's free. 1590 */ 1591 ASSERT(hash[bucket].dtdh_chain != dvar); 1592 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1593 1594 dtrace_membar_producer(); 1595 1596 /* 1597 * Set the next pointer to point at the dirty list, and 1598 * atomically swing the dirty pointer to the newly freed dvar. 1599 */ 1600 do { 1601 next = dcpu->dtdsc_dirty; 1602 dvar->dtdv_next = next; 1603 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1604 1605 /* 1606 * Finally, unlock this hash bucket. 1607 */ 1608 ASSERT(hash[bucket].dtdh_lock == lock); 1609 ASSERT(lock & 1); 1610 hash[bucket].dtdh_lock++; 1611 1612 return (NULL); 1613 next: 1614 prev = dvar; 1615 continue; 1616 } 1617 1618 if (dvar == NULL) { 1619 /* 1620 * If dvar is NULL, it is because we went off the rails: 1621 * one of the elements that we traversed in the hash chain 1622 * was deleted while we were traversing it. In this case, 1623 * we assert that we aren't doing a dealloc (deallocs lock 1624 * the hash bucket to prevent themselves from racing with 1625 * one another), and retry the hash chain traversal. 1626 */ 1627 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1628 goto top; 1629 } 1630 1631 if (op != DTRACE_DYNVAR_ALLOC) { 1632 /* 1633 * If we are not to allocate a new variable, we want to 1634 * return NULL now. Before we return, check that the value 1635 * of the lock word hasn't changed. If it has, we may have 1636 * seen an inconsistent snapshot. 1637 */ 1638 if (op == DTRACE_DYNVAR_NOALLOC) { 1639 if (hash[bucket].dtdh_lock != lock) 1640 goto top; 1641 } else { 1642 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1643 ASSERT(hash[bucket].dtdh_lock == lock); 1644 ASSERT(lock & 1); 1645 hash[bucket].dtdh_lock++; 1646 } 1647 1648 return (NULL); 1649 } 1650 1651 /* 1652 * We need to allocate a new dynamic variable. The size we need is the 1653 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1654 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1655 * the size of any referred-to data (dsize). We then round the final 1656 * size up to the chunksize for allocation. 1657 */ 1658 for (ksize = 0, i = 0; i < nkeys; i++) 1659 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1660 1661 /* 1662 * This should be pretty much impossible, but could happen if, say, 1663 * strange DIF specified the tuple. Ideally, this should be an 1664 * assertion and not an error condition -- but that requires that the 1665 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1666 * bullet-proof. (That is, it must not be able to be fooled by 1667 * malicious DIF.) Given the lack of backwards branches in DIF, 1668 * solving this would presumably not amount to solving the Halting 1669 * Problem -- but it still seems awfully hard. 1670 */ 1671 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1672 ksize + dsize > chunksize) { 1673 dcpu->dtdsc_drops++; 1674 return (NULL); 1675 } 1676 1677 nstate = DTRACE_DSTATE_EMPTY; 1678 1679 do { 1680 retry: 1681 free = dcpu->dtdsc_free; 1682 1683 if (free == NULL) { 1684 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1685 void *rval; 1686 1687 if (clean == NULL) { 1688 /* 1689 * We're out of dynamic variable space on 1690 * this CPU. Unless we have tried all CPUs, 1691 * we'll try to allocate from a different 1692 * CPU. 1693 */ 1694 switch (dstate->dtds_state) { 1695 case DTRACE_DSTATE_CLEAN: { 1696 void *sp = &dstate->dtds_state; 1697 1698 if (++cpu >= NCPU) 1699 cpu = 0; 1700 1701 if (dcpu->dtdsc_dirty != NULL && 1702 nstate == DTRACE_DSTATE_EMPTY) 1703 nstate = DTRACE_DSTATE_DIRTY; 1704 1705 if (dcpu->dtdsc_rinsing != NULL) 1706 nstate = DTRACE_DSTATE_RINSING; 1707 1708 dcpu = &dstate->dtds_percpu[cpu]; 1709 1710 if (cpu != me) 1711 goto retry; 1712 1713 (void) dtrace_cas32(sp, 1714 DTRACE_DSTATE_CLEAN, nstate); 1715 1716 /* 1717 * To increment the correct bean 1718 * counter, take another lap. 1719 */ 1720 goto retry; 1721 } 1722 1723 case DTRACE_DSTATE_DIRTY: 1724 dcpu->dtdsc_dirty_drops++; 1725 break; 1726 1727 case DTRACE_DSTATE_RINSING: 1728 dcpu->dtdsc_rinsing_drops++; 1729 break; 1730 1731 case DTRACE_DSTATE_EMPTY: 1732 dcpu->dtdsc_drops++; 1733 break; 1734 } 1735 1736 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1737 return (NULL); 1738 } 1739 1740 /* 1741 * The clean list appears to be non-empty. We want to 1742 * move the clean list to the free list; we start by 1743 * moving the clean pointer aside. 1744 */ 1745 if (dtrace_casptr(&dcpu->dtdsc_clean, 1746 clean, NULL) != clean) { 1747 /* 1748 * We are in one of two situations: 1749 * 1750 * (a) The clean list was switched to the 1751 * free list by another CPU. 1752 * 1753 * (b) The clean list was added to by the 1754 * cleansing cyclic. 1755 * 1756 * In either of these situations, we can 1757 * just reattempt the free list allocation. 1758 */ 1759 goto retry; 1760 } 1761 1762 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1763 1764 /* 1765 * Now we'll move the clean list to the free list. 1766 * It's impossible for this to fail: the only way 1767 * the free list can be updated is through this 1768 * code path, and only one CPU can own the clean list. 1769 * Thus, it would only be possible for this to fail if 1770 * this code were racing with dtrace_dynvar_clean(). 1771 * (That is, if dtrace_dynvar_clean() updated the clean 1772 * list, and we ended up racing to update the free 1773 * list.) This race is prevented by the dtrace_sync() 1774 * in dtrace_dynvar_clean() -- which flushes the 1775 * owners of the clean lists out before resetting 1776 * the clean lists. 1777 */ 1778 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1779 ASSERT(rval == NULL); 1780 goto retry; 1781 } 1782 1783 dvar = free; 1784 new_free = dvar->dtdv_next; 1785 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1786 1787 /* 1788 * We have now allocated a new chunk. We copy the tuple keys into the 1789 * tuple array and copy any referenced key data into the data space 1790 * following the tuple array. As we do this, we relocate dttk_value 1791 * in the final tuple to point to the key data address in the chunk. 1792 */ 1793 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1794 dvar->dtdv_data = (void *)(kdata + ksize); 1795 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1796 1797 for (i = 0; i < nkeys; i++) { 1798 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1799 size_t kesize = key[i].dttk_size; 1800 1801 if (kesize != 0) { 1802 dtrace_bcopy( 1803 (const void *)(uintptr_t)key[i].dttk_value, 1804 (void *)kdata, kesize); 1805 dkey->dttk_value = kdata; 1806 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1807 } else { 1808 dkey->dttk_value = key[i].dttk_value; 1809 } 1810 1811 dkey->dttk_size = kesize; 1812 } 1813 1814 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1815 dvar->dtdv_hashval = hashval; 1816 dvar->dtdv_next = start; 1817 1818 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1819 return (dvar); 1820 1821 /* 1822 * The cas has failed. Either another CPU is adding an element to 1823 * this hash chain, or another CPU is deleting an element from this 1824 * hash chain. The simplest way to deal with both of these cases 1825 * (though not necessarily the most efficient) is to free our 1826 * allocated block and tail-call ourselves. Note that the free is 1827 * to the dirty list and _not_ to the free list. This is to prevent 1828 * races with allocators, above. 1829 */ 1830 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1831 1832 dtrace_membar_producer(); 1833 1834 do { 1835 free = dcpu->dtdsc_dirty; 1836 dvar->dtdv_next = free; 1837 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1838 1839 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1840 } 1841 1842 /*ARGSUSED*/ 1843 static void 1844 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1845 { 1846 if ((int64_t)nval < (int64_t)*oval) 1847 *oval = nval; 1848 } 1849 1850 /*ARGSUSED*/ 1851 static void 1852 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1853 { 1854 if ((int64_t)nval > (int64_t)*oval) 1855 *oval = nval; 1856 } 1857 1858 static void 1859 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1860 { 1861 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1862 int64_t val = (int64_t)nval; 1863 1864 if (val < 0) { 1865 for (i = 0; i < zero; i++) { 1866 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1867 quanta[i] += incr; 1868 return; 1869 } 1870 } 1871 } else { 1872 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1873 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1874 quanta[i - 1] += incr; 1875 return; 1876 } 1877 } 1878 1879 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1880 return; 1881 } 1882 1883 ASSERT(0); 1884 } 1885 1886 static void 1887 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1888 { 1889 uint64_t arg = *lquanta++; 1890 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1891 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1892 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1893 int32_t val = (int32_t)nval, level; 1894 1895 ASSERT(step != 0); 1896 ASSERT(levels != 0); 1897 1898 if (val < base) { 1899 /* 1900 * This is an underflow. 1901 */ 1902 lquanta[0] += incr; 1903 return; 1904 } 1905 1906 level = (val - base) / step; 1907 1908 if (level < levels) { 1909 lquanta[level + 1] += incr; 1910 return; 1911 } 1912 1913 /* 1914 * This is an overflow. 1915 */ 1916 lquanta[levels + 1] += incr; 1917 } 1918 1919 static int 1920 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1921 uint16_t high, uint16_t nsteps, int64_t value) 1922 { 1923 int64_t this = 1, last, next; 1924 int base = 1, order; 1925 1926 ASSERT(factor <= nsteps); 1927 ASSERT(nsteps % factor == 0); 1928 1929 for (order = 0; order < low; order++) 1930 this *= factor; 1931 1932 /* 1933 * If our value is less than our factor taken to the power of the 1934 * low order of magnitude, it goes into the zeroth bucket. 1935 */ 1936 if (value < (last = this)) 1937 return (0); 1938 1939 for (this *= factor; order <= high; order++) { 1940 int nbuckets = this > nsteps ? nsteps : this; 1941 1942 if ((next = this * factor) < this) { 1943 /* 1944 * We should not generally get log/linear quantizations 1945 * with a high magnitude that allows 64-bits to 1946 * overflow, but we nonetheless protect against this 1947 * by explicitly checking for overflow, and clamping 1948 * our value accordingly. 1949 */ 1950 value = this - 1; 1951 } 1952 1953 if (value < this) { 1954 /* 1955 * If our value lies within this order of magnitude, 1956 * determine its position by taking the offset within 1957 * the order of magnitude, dividing by the bucket 1958 * width, and adding to our (accumulated) base. 1959 */ 1960 return (base + (value - last) / (this / nbuckets)); 1961 } 1962 1963 base += nbuckets - (nbuckets / factor); 1964 last = this; 1965 this = next; 1966 } 1967 1968 /* 1969 * Our value is greater than or equal to our factor taken to the 1970 * power of one plus the high magnitude -- return the top bucket. 1971 */ 1972 return (base); 1973 } 1974 1975 static void 1976 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1977 { 1978 uint64_t arg = *llquanta++; 1979 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1980 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1981 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1982 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1983 1984 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1985 low, high, nsteps, nval)] += incr; 1986 } 1987 1988 /*ARGSUSED*/ 1989 static void 1990 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1991 { 1992 data[0]++; 1993 data[1] += nval; 1994 } 1995 1996 /*ARGSUSED*/ 1997 static void 1998 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1999 { 2000 int64_t snval = (int64_t)nval; 2001 uint64_t tmp[2]; 2002 2003 data[0]++; 2004 data[1] += nval; 2005 2006 /* 2007 * What we want to say here is: 2008 * 2009 * data[2] += nval * nval; 2010 * 2011 * But given that nval is 64-bit, we could easily overflow, so 2012 * we do this as 128-bit arithmetic. 2013 */ 2014 if (snval < 0) 2015 snval = -snval; 2016 2017 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2018 dtrace_add_128(data + 2, tmp, data + 2); 2019 } 2020 2021 /*ARGSUSED*/ 2022 static void 2023 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2024 { 2025 *oval = *oval + 1; 2026 } 2027 2028 /*ARGSUSED*/ 2029 static void 2030 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2031 { 2032 *oval += nval; 2033 } 2034 2035 /* 2036 * Aggregate given the tuple in the principal data buffer, and the aggregating 2037 * action denoted by the specified dtrace_aggregation_t. The aggregation 2038 * buffer is specified as the buf parameter. This routine does not return 2039 * failure; if there is no space in the aggregation buffer, the data will be 2040 * dropped, and a corresponding counter incremented. 2041 */ 2042 static void 2043 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2044 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2045 { 2046 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2047 uint32_t i, ndx, size, fsize; 2048 uint32_t align = sizeof (uint64_t) - 1; 2049 dtrace_aggbuffer_t *agb; 2050 dtrace_aggkey_t *key; 2051 uint32_t hashval = 0, limit, isstr; 2052 caddr_t tomax, data, kdata; 2053 dtrace_actkind_t action; 2054 dtrace_action_t *act; 2055 uintptr_t offs; 2056 2057 if (buf == NULL) 2058 return; 2059 2060 if (!agg->dtag_hasarg) { 2061 /* 2062 * Currently, only quantize() and lquantize() take additional 2063 * arguments, and they have the same semantics: an increment 2064 * value that defaults to 1 when not present. If additional 2065 * aggregating actions take arguments, the setting of the 2066 * default argument value will presumably have to become more 2067 * sophisticated... 2068 */ 2069 arg = 1; 2070 } 2071 2072 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2073 size = rec->dtrd_offset - agg->dtag_base; 2074 fsize = size + rec->dtrd_size; 2075 2076 ASSERT(dbuf->dtb_tomax != NULL); 2077 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2078 2079 if ((tomax = buf->dtb_tomax) == NULL) { 2080 dtrace_buffer_drop(buf); 2081 return; 2082 } 2083 2084 /* 2085 * The metastructure is always at the bottom of the buffer. 2086 */ 2087 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2088 sizeof (dtrace_aggbuffer_t)); 2089 2090 if (buf->dtb_offset == 0) { 2091 /* 2092 * We just kludge up approximately 1/8th of the size to be 2093 * buckets. If this guess ends up being routinely 2094 * off-the-mark, we may need to dynamically readjust this 2095 * based on past performance. 2096 */ 2097 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2098 2099 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2100 (uintptr_t)tomax || hashsize == 0) { 2101 /* 2102 * We've been given a ludicrously small buffer; 2103 * increment our drop count and leave. 2104 */ 2105 dtrace_buffer_drop(buf); 2106 return; 2107 } 2108 2109 /* 2110 * And now, a pathetic attempt to try to get a an odd (or 2111 * perchance, a prime) hash size for better hash distribution. 2112 */ 2113 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2114 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2115 2116 agb->dtagb_hashsize = hashsize; 2117 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2118 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2119 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2120 2121 for (i = 0; i < agb->dtagb_hashsize; i++) 2122 agb->dtagb_hash[i] = NULL; 2123 } 2124 2125 ASSERT(agg->dtag_first != NULL); 2126 ASSERT(agg->dtag_first->dta_intuple); 2127 2128 /* 2129 * Calculate the hash value based on the key. Note that we _don't_ 2130 * include the aggid in the hashing (but we will store it as part of 2131 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2132 * algorithm: a simple, quick algorithm that has no known funnels, and 2133 * gets good distribution in practice. The efficacy of the hashing 2134 * algorithm (and a comparison with other algorithms) may be found by 2135 * running the ::dtrace_aggstat MDB dcmd. 2136 */ 2137 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2138 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2139 limit = i + act->dta_rec.dtrd_size; 2140 ASSERT(limit <= size); 2141 isstr = DTRACEACT_ISSTRING(act); 2142 2143 for (; i < limit; i++) { 2144 hashval += data[i]; 2145 hashval += (hashval << 10); 2146 hashval ^= (hashval >> 6); 2147 2148 if (isstr && data[i] == '\0') 2149 break; 2150 } 2151 } 2152 2153 hashval += (hashval << 3); 2154 hashval ^= (hashval >> 11); 2155 hashval += (hashval << 15); 2156 2157 /* 2158 * Yes, the divide here is expensive -- but it's generally the least 2159 * of the performance issues given the amount of data that we iterate 2160 * over to compute hash values, compare data, etc. 2161 */ 2162 ndx = hashval % agb->dtagb_hashsize; 2163 2164 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2165 ASSERT((caddr_t)key >= tomax); 2166 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2167 2168 if (hashval != key->dtak_hashval || key->dtak_size != size) 2169 continue; 2170 2171 kdata = key->dtak_data; 2172 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2173 2174 for (act = agg->dtag_first; act->dta_intuple; 2175 act = act->dta_next) { 2176 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2177 limit = i + act->dta_rec.dtrd_size; 2178 ASSERT(limit <= size); 2179 isstr = DTRACEACT_ISSTRING(act); 2180 2181 for (; i < limit; i++) { 2182 if (kdata[i] != data[i]) 2183 goto next; 2184 2185 if (isstr && data[i] == '\0') 2186 break; 2187 } 2188 } 2189 2190 if (action != key->dtak_action) { 2191 /* 2192 * We are aggregating on the same value in the same 2193 * aggregation with two different aggregating actions. 2194 * (This should have been picked up in the compiler, 2195 * so we may be dealing with errant or devious DIF.) 2196 * This is an error condition; we indicate as much, 2197 * and return. 2198 */ 2199 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2200 return; 2201 } 2202 2203 /* 2204 * This is a hit: we need to apply the aggregator to 2205 * the value at this key. 2206 */ 2207 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2208 return; 2209 next: 2210 continue; 2211 } 2212 2213 /* 2214 * We didn't find it. We need to allocate some zero-filled space, 2215 * link it into the hash table appropriately, and apply the aggregator 2216 * to the (zero-filled) value. 2217 */ 2218 offs = buf->dtb_offset; 2219 while (offs & (align - 1)) 2220 offs += sizeof (uint32_t); 2221 2222 /* 2223 * If we don't have enough room to both allocate a new key _and_ 2224 * its associated data, increment the drop count and return. 2225 */ 2226 if ((uintptr_t)tomax + offs + fsize > 2227 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2228 dtrace_buffer_drop(buf); 2229 return; 2230 } 2231 2232 /*CONSTCOND*/ 2233 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2234 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2235 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2236 2237 key->dtak_data = kdata = tomax + offs; 2238 buf->dtb_offset = offs + fsize; 2239 2240 /* 2241 * Now copy the data across. 2242 */ 2243 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2244 2245 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2246 kdata[i] = data[i]; 2247 2248 /* 2249 * Because strings are not zeroed out by default, we need to iterate 2250 * looking for actions that store strings, and we need to explicitly 2251 * pad these strings out with zeroes. 2252 */ 2253 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2254 int nul; 2255 2256 if (!DTRACEACT_ISSTRING(act)) 2257 continue; 2258 2259 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2260 limit = i + act->dta_rec.dtrd_size; 2261 ASSERT(limit <= size); 2262 2263 for (nul = 0; i < limit; i++) { 2264 if (nul) { 2265 kdata[i] = '\0'; 2266 continue; 2267 } 2268 2269 if (data[i] != '\0') 2270 continue; 2271 2272 nul = 1; 2273 } 2274 } 2275 2276 for (i = size; i < fsize; i++) 2277 kdata[i] = 0; 2278 2279 key->dtak_hashval = hashval; 2280 key->dtak_size = size; 2281 key->dtak_action = action; 2282 key->dtak_next = agb->dtagb_hash[ndx]; 2283 agb->dtagb_hash[ndx] = key; 2284 2285 /* 2286 * Finally, apply the aggregator. 2287 */ 2288 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2289 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2290 } 2291 2292 /* 2293 * Given consumer state, this routine finds a speculation in the INACTIVE 2294 * state and transitions it into the ACTIVE state. If there is no speculation 2295 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2296 * incremented -- it is up to the caller to take appropriate action. 2297 */ 2298 static int 2299 dtrace_speculation(dtrace_state_t *state) 2300 { 2301 int i = 0; 2302 dtrace_speculation_state_t current; 2303 uint32_t *stat = &state->dts_speculations_unavail, count; 2304 2305 while (i < state->dts_nspeculations) { 2306 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2307 2308 current = spec->dtsp_state; 2309 2310 if (current != DTRACESPEC_INACTIVE) { 2311 if (current == DTRACESPEC_COMMITTINGMANY || 2312 current == DTRACESPEC_COMMITTING || 2313 current == DTRACESPEC_DISCARDING) 2314 stat = &state->dts_speculations_busy; 2315 i++; 2316 continue; 2317 } 2318 2319 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2320 current, DTRACESPEC_ACTIVE) == current) 2321 return (i + 1); 2322 } 2323 2324 /* 2325 * We couldn't find a speculation. If we found as much as a single 2326 * busy speculation buffer, we'll attribute this failure as "busy" 2327 * instead of "unavail". 2328 */ 2329 do { 2330 count = *stat; 2331 } while (dtrace_cas32(stat, count, count + 1) != count); 2332 2333 return (0); 2334 } 2335 2336 /* 2337 * This routine commits an active speculation. If the specified speculation 2338 * is not in a valid state to perform a commit(), this routine will silently do 2339 * nothing. The state of the specified speculation is transitioned according 2340 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2341 */ 2342 static void 2343 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2344 dtrace_specid_t which) 2345 { 2346 dtrace_speculation_t *spec; 2347 dtrace_buffer_t *src, *dest; 2348 uintptr_t daddr, saddr, dlimit; 2349 dtrace_speculation_state_t current, new = 0; 2350 intptr_t offs; 2351 2352 if (which == 0) 2353 return; 2354 2355 if (which > state->dts_nspeculations) { 2356 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2357 return; 2358 } 2359 2360 spec = &state->dts_speculations[which - 1]; 2361 src = &spec->dtsp_buffer[cpu]; 2362 dest = &state->dts_buffer[cpu]; 2363 2364 do { 2365 current = spec->dtsp_state; 2366 2367 if (current == DTRACESPEC_COMMITTINGMANY) 2368 break; 2369 2370 switch (current) { 2371 case DTRACESPEC_INACTIVE: 2372 case DTRACESPEC_DISCARDING: 2373 return; 2374 2375 case DTRACESPEC_COMMITTING: 2376 /* 2377 * This is only possible if we are (a) commit()'ing 2378 * without having done a prior speculate() on this CPU 2379 * and (b) racing with another commit() on a different 2380 * CPU. There's nothing to do -- we just assert that 2381 * our offset is 0. 2382 */ 2383 ASSERT(src->dtb_offset == 0); 2384 return; 2385 2386 case DTRACESPEC_ACTIVE: 2387 new = DTRACESPEC_COMMITTING; 2388 break; 2389 2390 case DTRACESPEC_ACTIVEONE: 2391 /* 2392 * This speculation is active on one CPU. If our 2393 * buffer offset is non-zero, we know that the one CPU 2394 * must be us. Otherwise, we are committing on a 2395 * different CPU from the speculate(), and we must 2396 * rely on being asynchronously cleaned. 2397 */ 2398 if (src->dtb_offset != 0) { 2399 new = DTRACESPEC_COMMITTING; 2400 break; 2401 } 2402 /*FALLTHROUGH*/ 2403 2404 case DTRACESPEC_ACTIVEMANY: 2405 new = DTRACESPEC_COMMITTINGMANY; 2406 break; 2407 2408 default: 2409 ASSERT(0); 2410 } 2411 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2412 current, new) != current); 2413 2414 /* 2415 * We have set the state to indicate that we are committing this 2416 * speculation. Now reserve the necessary space in the destination 2417 * buffer. 2418 */ 2419 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2420 sizeof (uint64_t), state, NULL)) < 0) { 2421 dtrace_buffer_drop(dest); 2422 goto out; 2423 } 2424 2425 /* 2426 * We have the space; copy the buffer across. (Note that this is a 2427 * highly subobtimal bcopy(); in the unlikely event that this becomes 2428 * a serious performance issue, a high-performance DTrace-specific 2429 * bcopy() should obviously be invented.) 2430 */ 2431 daddr = (uintptr_t)dest->dtb_tomax + offs; 2432 dlimit = daddr + src->dtb_offset; 2433 saddr = (uintptr_t)src->dtb_tomax; 2434 2435 /* 2436 * First, the aligned portion. 2437 */ 2438 while (dlimit - daddr >= sizeof (uint64_t)) { 2439 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2440 2441 daddr += sizeof (uint64_t); 2442 saddr += sizeof (uint64_t); 2443 } 2444 2445 /* 2446 * Now any left-over bit... 2447 */ 2448 while (dlimit - daddr) 2449 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2450 2451 /* 2452 * Finally, commit the reserved space in the destination buffer. 2453 */ 2454 dest->dtb_offset = offs + src->dtb_offset; 2455 2456 out: 2457 /* 2458 * If we're lucky enough to be the only active CPU on this speculation 2459 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2460 */ 2461 if (current == DTRACESPEC_ACTIVE || 2462 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2463 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2464 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2465 2466 ASSERT(rval == DTRACESPEC_COMMITTING); 2467 } 2468 2469 src->dtb_offset = 0; 2470 src->dtb_xamot_drops += src->dtb_drops; 2471 src->dtb_drops = 0; 2472 } 2473 2474 /* 2475 * This routine discards an active speculation. If the specified speculation 2476 * is not in a valid state to perform a discard(), this routine will silently 2477 * do nothing. The state of the specified speculation is transitioned 2478 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2479 */ 2480 static void 2481 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2482 dtrace_specid_t which) 2483 { 2484 dtrace_speculation_t *spec; 2485 dtrace_speculation_state_t current, new = 0; 2486 dtrace_buffer_t *buf; 2487 2488 if (which == 0) 2489 return; 2490 2491 if (which > state->dts_nspeculations) { 2492 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2493 return; 2494 } 2495 2496 spec = &state->dts_speculations[which - 1]; 2497 buf = &spec->dtsp_buffer[cpu]; 2498 2499 do { 2500 current = spec->dtsp_state; 2501 2502 switch (current) { 2503 case DTRACESPEC_INACTIVE: 2504 case DTRACESPEC_COMMITTINGMANY: 2505 case DTRACESPEC_COMMITTING: 2506 case DTRACESPEC_DISCARDING: 2507 return; 2508 2509 case DTRACESPEC_ACTIVE: 2510 case DTRACESPEC_ACTIVEMANY: 2511 new = DTRACESPEC_DISCARDING; 2512 break; 2513 2514 case DTRACESPEC_ACTIVEONE: 2515 if (buf->dtb_offset != 0) { 2516 new = DTRACESPEC_INACTIVE; 2517 } else { 2518 new = DTRACESPEC_DISCARDING; 2519 } 2520 break; 2521 2522 default: 2523 ASSERT(0); 2524 } 2525 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2526 current, new) != current); 2527 2528 buf->dtb_offset = 0; 2529 buf->dtb_drops = 0; 2530 } 2531 2532 /* 2533 * Note: not called from probe context. This function is called 2534 * asynchronously from cross call context to clean any speculations that are 2535 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2536 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2537 * speculation. 2538 */ 2539 static void 2540 dtrace_speculation_clean_here(dtrace_state_t *state) 2541 { 2542 dtrace_icookie_t cookie; 2543 processorid_t cpu = curcpu; 2544 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2545 dtrace_specid_t i; 2546 2547 cookie = dtrace_interrupt_disable(); 2548 2549 if (dest->dtb_tomax == NULL) { 2550 dtrace_interrupt_enable(cookie); 2551 return; 2552 } 2553 2554 for (i = 0; i < state->dts_nspeculations; i++) { 2555 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2556 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2557 2558 if (src->dtb_tomax == NULL) 2559 continue; 2560 2561 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2562 src->dtb_offset = 0; 2563 continue; 2564 } 2565 2566 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2567 continue; 2568 2569 if (src->dtb_offset == 0) 2570 continue; 2571 2572 dtrace_speculation_commit(state, cpu, i + 1); 2573 } 2574 2575 dtrace_interrupt_enable(cookie); 2576 } 2577 2578 /* 2579 * Note: not called from probe context. This function is called 2580 * asynchronously (and at a regular interval) to clean any speculations that 2581 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2582 * is work to be done, it cross calls all CPUs to perform that work; 2583 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2584 * INACTIVE state until they have been cleaned by all CPUs. 2585 */ 2586 static void 2587 dtrace_speculation_clean(dtrace_state_t *state) 2588 { 2589 int work = 0, rv; 2590 dtrace_specid_t i; 2591 2592 for (i = 0; i < state->dts_nspeculations; i++) { 2593 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2594 2595 ASSERT(!spec->dtsp_cleaning); 2596 2597 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2598 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2599 continue; 2600 2601 work++; 2602 spec->dtsp_cleaning = 1; 2603 } 2604 2605 if (!work) 2606 return; 2607 2608 dtrace_xcall(DTRACE_CPUALL, 2609 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2610 2611 /* 2612 * We now know that all CPUs have committed or discarded their 2613 * speculation buffers, as appropriate. We can now set the state 2614 * to inactive. 2615 */ 2616 for (i = 0; i < state->dts_nspeculations; i++) { 2617 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2618 dtrace_speculation_state_t current, new; 2619 2620 if (!spec->dtsp_cleaning) 2621 continue; 2622 2623 current = spec->dtsp_state; 2624 ASSERT(current == DTRACESPEC_DISCARDING || 2625 current == DTRACESPEC_COMMITTINGMANY); 2626 2627 new = DTRACESPEC_INACTIVE; 2628 2629 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2630 ASSERT(rv == current); 2631 spec->dtsp_cleaning = 0; 2632 } 2633 } 2634 2635 /* 2636 * Called as part of a speculate() to get the speculative buffer associated 2637 * with a given speculation. Returns NULL if the specified speculation is not 2638 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2639 * the active CPU is not the specified CPU -- the speculation will be 2640 * atomically transitioned into the ACTIVEMANY state. 2641 */ 2642 static dtrace_buffer_t * 2643 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2644 dtrace_specid_t which) 2645 { 2646 dtrace_speculation_t *spec; 2647 dtrace_speculation_state_t current, new = 0; 2648 dtrace_buffer_t *buf; 2649 2650 if (which == 0) 2651 return (NULL); 2652 2653 if (which > state->dts_nspeculations) { 2654 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2655 return (NULL); 2656 } 2657 2658 spec = &state->dts_speculations[which - 1]; 2659 buf = &spec->dtsp_buffer[cpuid]; 2660 2661 do { 2662 current = spec->dtsp_state; 2663 2664 switch (current) { 2665 case DTRACESPEC_INACTIVE: 2666 case DTRACESPEC_COMMITTINGMANY: 2667 case DTRACESPEC_DISCARDING: 2668 return (NULL); 2669 2670 case DTRACESPEC_COMMITTING: 2671 ASSERT(buf->dtb_offset == 0); 2672 return (NULL); 2673 2674 case DTRACESPEC_ACTIVEONE: 2675 /* 2676 * This speculation is currently active on one CPU. 2677 * Check the offset in the buffer; if it's non-zero, 2678 * that CPU must be us (and we leave the state alone). 2679 * If it's zero, assume that we're starting on a new 2680 * CPU -- and change the state to indicate that the 2681 * speculation is active on more than one CPU. 2682 */ 2683 if (buf->dtb_offset != 0) 2684 return (buf); 2685 2686 new = DTRACESPEC_ACTIVEMANY; 2687 break; 2688 2689 case DTRACESPEC_ACTIVEMANY: 2690 return (buf); 2691 2692 case DTRACESPEC_ACTIVE: 2693 new = DTRACESPEC_ACTIVEONE; 2694 break; 2695 2696 default: 2697 ASSERT(0); 2698 } 2699 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2700 current, new) != current); 2701 2702 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2703 return (buf); 2704 } 2705 2706 /* 2707 * Return a string. In the event that the user lacks the privilege to access 2708 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2709 * don't fail access checking. 2710 * 2711 * dtrace_dif_variable() uses this routine as a helper for various 2712 * builtin values such as 'execname' and 'probefunc.' 2713 */ 2714 uintptr_t 2715 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2716 dtrace_mstate_t *mstate) 2717 { 2718 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2719 uintptr_t ret; 2720 size_t strsz; 2721 2722 /* 2723 * The easy case: this probe is allowed to read all of memory, so 2724 * we can just return this as a vanilla pointer. 2725 */ 2726 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2727 return (addr); 2728 2729 /* 2730 * This is the tougher case: we copy the string in question from 2731 * kernel memory into scratch memory and return it that way: this 2732 * ensures that we won't trip up when access checking tests the 2733 * BYREF return value. 2734 */ 2735 strsz = dtrace_strlen((char *)addr, size) + 1; 2736 2737 if (mstate->dtms_scratch_ptr + strsz > 2738 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2739 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2740 return (0); 2741 } 2742 2743 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2744 strsz); 2745 ret = mstate->dtms_scratch_ptr; 2746 mstate->dtms_scratch_ptr += strsz; 2747 return (ret); 2748 } 2749 2750 /* 2751 * Return a string from a memoy address which is known to have one or 2752 * more concatenated, individually zero terminated, sub-strings. 2753 * In the event that the user lacks the privilege to access 2754 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2755 * don't fail access checking. 2756 * 2757 * dtrace_dif_variable() uses this routine as a helper for various 2758 * builtin values such as 'execargs'. 2759 */ 2760 static uintptr_t 2761 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2762 dtrace_mstate_t *mstate) 2763 { 2764 char *p; 2765 size_t i; 2766 uintptr_t ret; 2767 2768 if (mstate->dtms_scratch_ptr + strsz > 2769 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2770 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2771 return (0); 2772 } 2773 2774 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2775 strsz); 2776 2777 /* Replace sub-string termination characters with a space. */ 2778 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2779 p++, i++) 2780 if (*p == '\0') 2781 *p = ' '; 2782 2783 ret = mstate->dtms_scratch_ptr; 2784 mstate->dtms_scratch_ptr += strsz; 2785 return (ret); 2786 } 2787 2788 /* 2789 * This function implements the DIF emulator's variable lookups. The emulator 2790 * passes a reserved variable identifier and optional built-in array index. 2791 */ 2792 static uint64_t 2793 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2794 uint64_t ndx) 2795 { 2796 /* 2797 * If we're accessing one of the uncached arguments, we'll turn this 2798 * into a reference in the args array. 2799 */ 2800 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2801 ndx = v - DIF_VAR_ARG0; 2802 v = DIF_VAR_ARGS; 2803 } 2804 2805 switch (v) { 2806 case DIF_VAR_ARGS: 2807 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2808 if (ndx >= sizeof (mstate->dtms_arg) / 2809 sizeof (mstate->dtms_arg[0])) { 2810 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2811 dtrace_provider_t *pv; 2812 uint64_t val; 2813 2814 pv = mstate->dtms_probe->dtpr_provider; 2815 if (pv->dtpv_pops.dtps_getargval != NULL) 2816 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2817 mstate->dtms_probe->dtpr_id, 2818 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2819 else 2820 val = dtrace_getarg(ndx, aframes); 2821 2822 /* 2823 * This is regrettably required to keep the compiler 2824 * from tail-optimizing the call to dtrace_getarg(). 2825 * The condition always evaluates to true, but the 2826 * compiler has no way of figuring that out a priori. 2827 * (None of this would be necessary if the compiler 2828 * could be relied upon to _always_ tail-optimize 2829 * the call to dtrace_getarg() -- but it can't.) 2830 */ 2831 if (mstate->dtms_probe != NULL) 2832 return (val); 2833 2834 ASSERT(0); 2835 } 2836 2837 return (mstate->dtms_arg[ndx]); 2838 2839 #if defined(sun) 2840 case DIF_VAR_UREGS: { 2841 klwp_t *lwp; 2842 2843 if (!dtrace_priv_proc(state)) 2844 return (0); 2845 2846 if ((lwp = curthread->t_lwp) == NULL) { 2847 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2848 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2849 return (0); 2850 } 2851 2852 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2853 return (0); 2854 } 2855 #else 2856 case DIF_VAR_UREGS: { 2857 struct trapframe *tframe; 2858 2859 if (!dtrace_priv_proc(state)) 2860 return (0); 2861 2862 if ((tframe = curthread->td_frame) == NULL) { 2863 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2864 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2865 return (0); 2866 } 2867 2868 return (dtrace_getreg(tframe, ndx)); 2869 } 2870 #endif 2871 2872 case DIF_VAR_CURTHREAD: 2873 if (!dtrace_priv_kernel(state)) 2874 return (0); 2875 return ((uint64_t)(uintptr_t)curthread); 2876 2877 case DIF_VAR_TIMESTAMP: 2878 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2879 mstate->dtms_timestamp = dtrace_gethrtime(); 2880 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2881 } 2882 return (mstate->dtms_timestamp); 2883 2884 case DIF_VAR_VTIMESTAMP: 2885 ASSERT(dtrace_vtime_references != 0); 2886 return (curthread->t_dtrace_vtime); 2887 2888 case DIF_VAR_WALLTIMESTAMP: 2889 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2890 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2891 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2892 } 2893 return (mstate->dtms_walltimestamp); 2894 2895 #if defined(sun) 2896 case DIF_VAR_IPL: 2897 if (!dtrace_priv_kernel(state)) 2898 return (0); 2899 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2900 mstate->dtms_ipl = dtrace_getipl(); 2901 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2902 } 2903 return (mstate->dtms_ipl); 2904 #endif 2905 2906 case DIF_VAR_EPID: 2907 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2908 return (mstate->dtms_epid); 2909 2910 case DIF_VAR_ID: 2911 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2912 return (mstate->dtms_probe->dtpr_id); 2913 2914 case DIF_VAR_STACKDEPTH: 2915 if (!dtrace_priv_kernel(state)) 2916 return (0); 2917 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2918 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2919 2920 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2921 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2922 } 2923 return (mstate->dtms_stackdepth); 2924 2925 case DIF_VAR_USTACKDEPTH: 2926 if (!dtrace_priv_proc(state)) 2927 return (0); 2928 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2929 /* 2930 * See comment in DIF_VAR_PID. 2931 */ 2932 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2933 CPU_ON_INTR(CPU)) { 2934 mstate->dtms_ustackdepth = 0; 2935 } else { 2936 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2937 mstate->dtms_ustackdepth = 2938 dtrace_getustackdepth(); 2939 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2940 } 2941 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2942 } 2943 return (mstate->dtms_ustackdepth); 2944 2945 case DIF_VAR_CALLER: 2946 if (!dtrace_priv_kernel(state)) 2947 return (0); 2948 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2949 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2950 2951 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2952 /* 2953 * If this is an unanchored probe, we are 2954 * required to go through the slow path: 2955 * dtrace_caller() only guarantees correct 2956 * results for anchored probes. 2957 */ 2958 pc_t caller[2] = {0, 0}; 2959 2960 dtrace_getpcstack(caller, 2, aframes, 2961 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2962 mstate->dtms_caller = caller[1]; 2963 } else if ((mstate->dtms_caller = 2964 dtrace_caller(aframes)) == -1) { 2965 /* 2966 * We have failed to do this the quick way; 2967 * we must resort to the slower approach of 2968 * calling dtrace_getpcstack(). 2969 */ 2970 pc_t caller = 0; 2971 2972 dtrace_getpcstack(&caller, 1, aframes, NULL); 2973 mstate->dtms_caller = caller; 2974 } 2975 2976 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2977 } 2978 return (mstate->dtms_caller); 2979 2980 case DIF_VAR_UCALLER: 2981 if (!dtrace_priv_proc(state)) 2982 return (0); 2983 2984 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2985 uint64_t ustack[3]; 2986 2987 /* 2988 * dtrace_getupcstack() fills in the first uint64_t 2989 * with the current PID. The second uint64_t will 2990 * be the program counter at user-level. The third 2991 * uint64_t will contain the caller, which is what 2992 * we're after. 2993 */ 2994 ustack[2] = 0; 2995 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2996 dtrace_getupcstack(ustack, 3); 2997 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2998 mstate->dtms_ucaller = ustack[2]; 2999 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3000 } 3001 3002 return (mstate->dtms_ucaller); 3003 3004 case DIF_VAR_PROBEPROV: 3005 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3006 return (dtrace_dif_varstr( 3007 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3008 state, mstate)); 3009 3010 case DIF_VAR_PROBEMOD: 3011 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3012 return (dtrace_dif_varstr( 3013 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3014 state, mstate)); 3015 3016 case DIF_VAR_PROBEFUNC: 3017 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3018 return (dtrace_dif_varstr( 3019 (uintptr_t)mstate->dtms_probe->dtpr_func, 3020 state, mstate)); 3021 3022 case DIF_VAR_PROBENAME: 3023 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3024 return (dtrace_dif_varstr( 3025 (uintptr_t)mstate->dtms_probe->dtpr_name, 3026 state, mstate)); 3027 3028 case DIF_VAR_PID: 3029 if (!dtrace_priv_proc(state)) 3030 return (0); 3031 3032 #if defined(sun) 3033 /* 3034 * Note that we are assuming that an unanchored probe is 3035 * always due to a high-level interrupt. (And we're assuming 3036 * that there is only a single high level interrupt.) 3037 */ 3038 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3039 return (pid0.pid_id); 3040 3041 /* 3042 * It is always safe to dereference one's own t_procp pointer: 3043 * it always points to a valid, allocated proc structure. 3044 * Further, it is always safe to dereference the p_pidp member 3045 * of one's own proc structure. (These are truisms becuase 3046 * threads and processes don't clean up their own state -- 3047 * they leave that task to whomever reaps them.) 3048 */ 3049 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3050 #else 3051 return ((uint64_t)curproc->p_pid); 3052 #endif 3053 3054 case DIF_VAR_PPID: 3055 if (!dtrace_priv_proc(state)) 3056 return (0); 3057 3058 #if defined(sun) 3059 /* 3060 * See comment in DIF_VAR_PID. 3061 */ 3062 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3063 return (pid0.pid_id); 3064 3065 /* 3066 * It is always safe to dereference one's own t_procp pointer: 3067 * it always points to a valid, allocated proc structure. 3068 * (This is true because threads don't clean up their own 3069 * state -- they leave that task to whomever reaps them.) 3070 */ 3071 return ((uint64_t)curthread->t_procp->p_ppid); 3072 #else 3073 return ((uint64_t)curproc->p_pptr->p_pid); 3074 #endif 3075 3076 case DIF_VAR_TID: 3077 #if defined(sun) 3078 /* 3079 * See comment in DIF_VAR_PID. 3080 */ 3081 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3082 return (0); 3083 #endif 3084 3085 return ((uint64_t)curthread->t_tid); 3086 3087 case DIF_VAR_EXECARGS: { 3088 struct pargs *p_args = curthread->td_proc->p_args; 3089 3090 if (p_args == NULL) 3091 return(0); 3092 3093 return (dtrace_dif_varstrz( 3094 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3095 } 3096 3097 case DIF_VAR_EXECNAME: 3098 #if defined(sun) 3099 if (!dtrace_priv_proc(state)) 3100 return (0); 3101 3102 /* 3103 * See comment in DIF_VAR_PID. 3104 */ 3105 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3106 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3107 3108 /* 3109 * It is always safe to dereference one's own t_procp pointer: 3110 * it always points to a valid, allocated proc structure. 3111 * (This is true because threads don't clean up their own 3112 * state -- they leave that task to whomever reaps them.) 3113 */ 3114 return (dtrace_dif_varstr( 3115 (uintptr_t)curthread->t_procp->p_user.u_comm, 3116 state, mstate)); 3117 #else 3118 return (dtrace_dif_varstr( 3119 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3120 #endif 3121 3122 case DIF_VAR_ZONENAME: 3123 #if defined(sun) 3124 if (!dtrace_priv_proc(state)) 3125 return (0); 3126 3127 /* 3128 * See comment in DIF_VAR_PID. 3129 */ 3130 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3131 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3132 3133 /* 3134 * It is always safe to dereference one's own t_procp pointer: 3135 * it always points to a valid, allocated proc structure. 3136 * (This is true because threads don't clean up their own 3137 * state -- they leave that task to whomever reaps them.) 3138 */ 3139 return (dtrace_dif_varstr( 3140 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3141 state, mstate)); 3142 #else 3143 return (0); 3144 #endif 3145 3146 case DIF_VAR_UID: 3147 if (!dtrace_priv_proc(state)) 3148 return (0); 3149 3150 #if defined(sun) 3151 /* 3152 * See comment in DIF_VAR_PID. 3153 */ 3154 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3155 return ((uint64_t)p0.p_cred->cr_uid); 3156 #endif 3157 3158 /* 3159 * It is always safe to dereference one's own t_procp pointer: 3160 * it always points to a valid, allocated proc structure. 3161 * (This is true because threads don't clean up their own 3162 * state -- they leave that task to whomever reaps them.) 3163 * 3164 * Additionally, it is safe to dereference one's own process 3165 * credential, since this is never NULL after process birth. 3166 */ 3167 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3168 3169 case DIF_VAR_GID: 3170 if (!dtrace_priv_proc(state)) 3171 return (0); 3172 3173 #if defined(sun) 3174 /* 3175 * See comment in DIF_VAR_PID. 3176 */ 3177 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3178 return ((uint64_t)p0.p_cred->cr_gid); 3179 #endif 3180 3181 /* 3182 * It is always safe to dereference one's own t_procp pointer: 3183 * it always points to a valid, allocated proc structure. 3184 * (This is true because threads don't clean up their own 3185 * state -- they leave that task to whomever reaps them.) 3186 * 3187 * Additionally, it is safe to dereference one's own process 3188 * credential, since this is never NULL after process birth. 3189 */ 3190 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3191 3192 case DIF_VAR_ERRNO: { 3193 #if defined(sun) 3194 klwp_t *lwp; 3195 if (!dtrace_priv_proc(state)) 3196 return (0); 3197 3198 /* 3199 * See comment in DIF_VAR_PID. 3200 */ 3201 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3202 return (0); 3203 3204 /* 3205 * It is always safe to dereference one's own t_lwp pointer in 3206 * the event that this pointer is non-NULL. (This is true 3207 * because threads and lwps don't clean up their own state -- 3208 * they leave that task to whomever reaps them.) 3209 */ 3210 if ((lwp = curthread->t_lwp) == NULL) 3211 return (0); 3212 3213 return ((uint64_t)lwp->lwp_errno); 3214 #else 3215 return (curthread->td_errno); 3216 #endif 3217 } 3218 #if !defined(sun) 3219 case DIF_VAR_CPU: { 3220 return curcpu; 3221 } 3222 #endif 3223 default: 3224 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3225 return (0); 3226 } 3227 } 3228 3229 /* 3230 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3231 * Notice that we don't bother validating the proper number of arguments or 3232 * their types in the tuple stack. This isn't needed because all argument 3233 * interpretation is safe because of our load safety -- the worst that can 3234 * happen is that a bogus program can obtain bogus results. 3235 */ 3236 static void 3237 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3238 dtrace_key_t *tupregs, int nargs, 3239 dtrace_mstate_t *mstate, dtrace_state_t *state) 3240 { 3241 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3242 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3243 dtrace_vstate_t *vstate = &state->dts_vstate; 3244 3245 #if defined(sun) 3246 union { 3247 mutex_impl_t mi; 3248 uint64_t mx; 3249 } m; 3250 3251 union { 3252 krwlock_t ri; 3253 uintptr_t rw; 3254 } r; 3255 #else 3256 struct thread *lowner; 3257 union { 3258 struct lock_object *li; 3259 uintptr_t lx; 3260 } l; 3261 #endif 3262 3263 switch (subr) { 3264 case DIF_SUBR_RAND: 3265 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3266 break; 3267 3268 #if defined(sun) 3269 case DIF_SUBR_MUTEX_OWNED: 3270 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3271 mstate, vstate)) { 3272 regs[rd] = 0; 3273 break; 3274 } 3275 3276 m.mx = dtrace_load64(tupregs[0].dttk_value); 3277 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3278 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3279 else 3280 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3281 break; 3282 3283 case DIF_SUBR_MUTEX_OWNER: 3284 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3285 mstate, vstate)) { 3286 regs[rd] = 0; 3287 break; 3288 } 3289 3290 m.mx = dtrace_load64(tupregs[0].dttk_value); 3291 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3292 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3293 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3294 else 3295 regs[rd] = 0; 3296 break; 3297 3298 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3299 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3300 mstate, vstate)) { 3301 regs[rd] = 0; 3302 break; 3303 } 3304 3305 m.mx = dtrace_load64(tupregs[0].dttk_value); 3306 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3307 break; 3308 3309 case DIF_SUBR_MUTEX_TYPE_SPIN: 3310 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3311 mstate, vstate)) { 3312 regs[rd] = 0; 3313 break; 3314 } 3315 3316 m.mx = dtrace_load64(tupregs[0].dttk_value); 3317 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3318 break; 3319 3320 case DIF_SUBR_RW_READ_HELD: { 3321 uintptr_t tmp; 3322 3323 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3324 mstate, vstate)) { 3325 regs[rd] = 0; 3326 break; 3327 } 3328 3329 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3330 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3331 break; 3332 } 3333 3334 case DIF_SUBR_RW_WRITE_HELD: 3335 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3336 mstate, vstate)) { 3337 regs[rd] = 0; 3338 break; 3339 } 3340 3341 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3342 regs[rd] = _RW_WRITE_HELD(&r.ri); 3343 break; 3344 3345 case DIF_SUBR_RW_ISWRITER: 3346 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3347 mstate, vstate)) { 3348 regs[rd] = 0; 3349 break; 3350 } 3351 3352 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3353 regs[rd] = _RW_ISWRITER(&r.ri); 3354 break; 3355 3356 #else 3357 case DIF_SUBR_MUTEX_OWNED: 3358 if (!dtrace_canload(tupregs[0].dttk_value, 3359 sizeof (struct lock_object), mstate, vstate)) { 3360 regs[rd] = 0; 3361 break; 3362 } 3363 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3364 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3365 break; 3366 3367 case DIF_SUBR_MUTEX_OWNER: 3368 if (!dtrace_canload(tupregs[0].dttk_value, 3369 sizeof (struct lock_object), mstate, vstate)) { 3370 regs[rd] = 0; 3371 break; 3372 } 3373 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3374 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3375 regs[rd] = (uintptr_t)lowner; 3376 break; 3377 3378 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3379 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3380 mstate, vstate)) { 3381 regs[rd] = 0; 3382 break; 3383 } 3384 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3385 /* XXX - should be only LC_SLEEPABLE? */ 3386 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3387 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3388 break; 3389 3390 case DIF_SUBR_MUTEX_TYPE_SPIN: 3391 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3392 mstate, vstate)) { 3393 regs[rd] = 0; 3394 break; 3395 } 3396 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3397 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3398 break; 3399 3400 case DIF_SUBR_RW_READ_HELD: 3401 case DIF_SUBR_SX_SHARED_HELD: 3402 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3403 mstate, vstate)) { 3404 regs[rd] = 0; 3405 break; 3406 } 3407 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3408 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3409 lowner == NULL; 3410 break; 3411 3412 case DIF_SUBR_RW_WRITE_HELD: 3413 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3414 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3415 mstate, vstate)) { 3416 regs[rd] = 0; 3417 break; 3418 } 3419 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3420 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3421 regs[rd] = (lowner == curthread); 3422 break; 3423 3424 case DIF_SUBR_RW_ISWRITER: 3425 case DIF_SUBR_SX_ISEXCLUSIVE: 3426 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3427 mstate, vstate)) { 3428 regs[rd] = 0; 3429 break; 3430 } 3431 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3432 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3433 lowner != NULL; 3434 break; 3435 #endif /* ! defined(sun) */ 3436 3437 case DIF_SUBR_BCOPY: { 3438 /* 3439 * We need to be sure that the destination is in the scratch 3440 * region -- no other region is allowed. 3441 */ 3442 uintptr_t src = tupregs[0].dttk_value; 3443 uintptr_t dest = tupregs[1].dttk_value; 3444 size_t size = tupregs[2].dttk_value; 3445 3446 if (!dtrace_inscratch(dest, size, mstate)) { 3447 *flags |= CPU_DTRACE_BADADDR; 3448 *illval = regs[rd]; 3449 break; 3450 } 3451 3452 if (!dtrace_canload(src, size, mstate, vstate)) { 3453 regs[rd] = 0; 3454 break; 3455 } 3456 3457 dtrace_bcopy((void *)src, (void *)dest, size); 3458 break; 3459 } 3460 3461 case DIF_SUBR_ALLOCA: 3462 case DIF_SUBR_COPYIN: { 3463 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3464 uint64_t size = 3465 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3466 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3467 3468 /* 3469 * This action doesn't require any credential checks since 3470 * probes will not activate in user contexts to which the 3471 * enabling user does not have permissions. 3472 */ 3473 3474 /* 3475 * Rounding up the user allocation size could have overflowed 3476 * a large, bogus allocation (like -1ULL) to 0. 3477 */ 3478 if (scratch_size < size || 3479 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3480 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3481 regs[rd] = 0; 3482 break; 3483 } 3484 3485 if (subr == DIF_SUBR_COPYIN) { 3486 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3487 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3488 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3489 } 3490 3491 mstate->dtms_scratch_ptr += scratch_size; 3492 regs[rd] = dest; 3493 break; 3494 } 3495 3496 case DIF_SUBR_COPYINTO: { 3497 uint64_t size = tupregs[1].dttk_value; 3498 uintptr_t dest = tupregs[2].dttk_value; 3499 3500 /* 3501 * This action doesn't require any credential checks since 3502 * probes will not activate in user contexts to which the 3503 * enabling user does not have permissions. 3504 */ 3505 if (!dtrace_inscratch(dest, size, mstate)) { 3506 *flags |= CPU_DTRACE_BADADDR; 3507 *illval = regs[rd]; 3508 break; 3509 } 3510 3511 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3512 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3513 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3514 break; 3515 } 3516 3517 case DIF_SUBR_COPYINSTR: { 3518 uintptr_t dest = mstate->dtms_scratch_ptr; 3519 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3520 3521 if (nargs > 1 && tupregs[1].dttk_value < size) 3522 size = tupregs[1].dttk_value + 1; 3523 3524 /* 3525 * This action doesn't require any credential checks since 3526 * probes will not activate in user contexts to which the 3527 * enabling user does not have permissions. 3528 */ 3529 if (!DTRACE_INSCRATCH(mstate, size)) { 3530 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3531 regs[rd] = 0; 3532 break; 3533 } 3534 3535 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3536 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3537 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3538 3539 ((char *)dest)[size - 1] = '\0'; 3540 mstate->dtms_scratch_ptr += size; 3541 regs[rd] = dest; 3542 break; 3543 } 3544 3545 #if defined(sun) 3546 case DIF_SUBR_MSGSIZE: 3547 case DIF_SUBR_MSGDSIZE: { 3548 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3549 uintptr_t wptr, rptr; 3550 size_t count = 0; 3551 int cont = 0; 3552 3553 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3554 3555 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3556 vstate)) { 3557 regs[rd] = 0; 3558 break; 3559 } 3560 3561 wptr = dtrace_loadptr(baddr + 3562 offsetof(mblk_t, b_wptr)); 3563 3564 rptr = dtrace_loadptr(baddr + 3565 offsetof(mblk_t, b_rptr)); 3566 3567 if (wptr < rptr) { 3568 *flags |= CPU_DTRACE_BADADDR; 3569 *illval = tupregs[0].dttk_value; 3570 break; 3571 } 3572 3573 daddr = dtrace_loadptr(baddr + 3574 offsetof(mblk_t, b_datap)); 3575 3576 baddr = dtrace_loadptr(baddr + 3577 offsetof(mblk_t, b_cont)); 3578 3579 /* 3580 * We want to prevent against denial-of-service here, 3581 * so we're only going to search the list for 3582 * dtrace_msgdsize_max mblks. 3583 */ 3584 if (cont++ > dtrace_msgdsize_max) { 3585 *flags |= CPU_DTRACE_ILLOP; 3586 break; 3587 } 3588 3589 if (subr == DIF_SUBR_MSGDSIZE) { 3590 if (dtrace_load8(daddr + 3591 offsetof(dblk_t, db_type)) != M_DATA) 3592 continue; 3593 } 3594 3595 count += wptr - rptr; 3596 } 3597 3598 if (!(*flags & CPU_DTRACE_FAULT)) 3599 regs[rd] = count; 3600 3601 break; 3602 } 3603 #endif 3604 3605 case DIF_SUBR_PROGENYOF: { 3606 pid_t pid = tupregs[0].dttk_value; 3607 proc_t *p; 3608 int rval = 0; 3609 3610 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3611 3612 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3613 #if defined(sun) 3614 if (p->p_pidp->pid_id == pid) { 3615 #else 3616 if (p->p_pid == pid) { 3617 #endif 3618 rval = 1; 3619 break; 3620 } 3621 } 3622 3623 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3624 3625 regs[rd] = rval; 3626 break; 3627 } 3628 3629 case DIF_SUBR_SPECULATION: 3630 regs[rd] = dtrace_speculation(state); 3631 break; 3632 3633 case DIF_SUBR_COPYOUT: { 3634 uintptr_t kaddr = tupregs[0].dttk_value; 3635 uintptr_t uaddr = tupregs[1].dttk_value; 3636 uint64_t size = tupregs[2].dttk_value; 3637 3638 if (!dtrace_destructive_disallow && 3639 dtrace_priv_proc_control(state) && 3640 !dtrace_istoxic(kaddr, size)) { 3641 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3642 dtrace_copyout(kaddr, uaddr, size, flags); 3643 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3644 } 3645 break; 3646 } 3647 3648 case DIF_SUBR_COPYOUTSTR: { 3649 uintptr_t kaddr = tupregs[0].dttk_value; 3650 uintptr_t uaddr = tupregs[1].dttk_value; 3651 uint64_t size = tupregs[2].dttk_value; 3652 3653 if (!dtrace_destructive_disallow && 3654 dtrace_priv_proc_control(state) && 3655 !dtrace_istoxic(kaddr, size)) { 3656 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3657 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3658 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3659 } 3660 break; 3661 } 3662 3663 case DIF_SUBR_STRLEN: { 3664 size_t sz; 3665 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3666 sz = dtrace_strlen((char *)addr, 3667 state->dts_options[DTRACEOPT_STRSIZE]); 3668 3669 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3670 regs[rd] = 0; 3671 break; 3672 } 3673 3674 regs[rd] = sz; 3675 3676 break; 3677 } 3678 3679 case DIF_SUBR_STRCHR: 3680 case DIF_SUBR_STRRCHR: { 3681 /* 3682 * We're going to iterate over the string looking for the 3683 * specified character. We will iterate until we have reached 3684 * the string length or we have found the character. If this 3685 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3686 * of the specified character instead of the first. 3687 */ 3688 uintptr_t saddr = tupregs[0].dttk_value; 3689 uintptr_t addr = tupregs[0].dttk_value; 3690 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3691 char c, target = (char)tupregs[1].dttk_value; 3692 3693 for (regs[rd] = 0; addr < limit; addr++) { 3694 if ((c = dtrace_load8(addr)) == target) { 3695 regs[rd] = addr; 3696 3697 if (subr == DIF_SUBR_STRCHR) 3698 break; 3699 } 3700 3701 if (c == '\0') 3702 break; 3703 } 3704 3705 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3706 regs[rd] = 0; 3707 break; 3708 } 3709 3710 break; 3711 } 3712 3713 case DIF_SUBR_STRSTR: 3714 case DIF_SUBR_INDEX: 3715 case DIF_SUBR_RINDEX: { 3716 /* 3717 * We're going to iterate over the string looking for the 3718 * specified string. We will iterate until we have reached 3719 * the string length or we have found the string. (Yes, this 3720 * is done in the most naive way possible -- but considering 3721 * that the string we're searching for is likely to be 3722 * relatively short, the complexity of Rabin-Karp or similar 3723 * hardly seems merited.) 3724 */ 3725 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3726 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3727 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3728 size_t len = dtrace_strlen(addr, size); 3729 size_t sublen = dtrace_strlen(substr, size); 3730 char *limit = addr + len, *orig = addr; 3731 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3732 int inc = 1; 3733 3734 regs[rd] = notfound; 3735 3736 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3737 regs[rd] = 0; 3738 break; 3739 } 3740 3741 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3742 vstate)) { 3743 regs[rd] = 0; 3744 break; 3745 } 3746 3747 /* 3748 * strstr() and index()/rindex() have similar semantics if 3749 * both strings are the empty string: strstr() returns a 3750 * pointer to the (empty) string, and index() and rindex() 3751 * both return index 0 (regardless of any position argument). 3752 */ 3753 if (sublen == 0 && len == 0) { 3754 if (subr == DIF_SUBR_STRSTR) 3755 regs[rd] = (uintptr_t)addr; 3756 else 3757 regs[rd] = 0; 3758 break; 3759 } 3760 3761 if (subr != DIF_SUBR_STRSTR) { 3762 if (subr == DIF_SUBR_RINDEX) { 3763 limit = orig - 1; 3764 addr += len; 3765 inc = -1; 3766 } 3767 3768 /* 3769 * Both index() and rindex() take an optional position 3770 * argument that denotes the starting position. 3771 */ 3772 if (nargs == 3) { 3773 int64_t pos = (int64_t)tupregs[2].dttk_value; 3774 3775 /* 3776 * If the position argument to index() is 3777 * negative, Perl implicitly clamps it at 3778 * zero. This semantic is a little surprising 3779 * given the special meaning of negative 3780 * positions to similar Perl functions like 3781 * substr(), but it appears to reflect a 3782 * notion that index() can start from a 3783 * negative index and increment its way up to 3784 * the string. Given this notion, Perl's 3785 * rindex() is at least self-consistent in 3786 * that it implicitly clamps positions greater 3787 * than the string length to be the string 3788 * length. Where Perl completely loses 3789 * coherence, however, is when the specified 3790 * substring is the empty string (""). In 3791 * this case, even if the position is 3792 * negative, rindex() returns 0 -- and even if 3793 * the position is greater than the length, 3794 * index() returns the string length. These 3795 * semantics violate the notion that index() 3796 * should never return a value less than the 3797 * specified position and that rindex() should 3798 * never return a value greater than the 3799 * specified position. (One assumes that 3800 * these semantics are artifacts of Perl's 3801 * implementation and not the results of 3802 * deliberate design -- it beggars belief that 3803 * even Larry Wall could desire such oddness.) 3804 * While in the abstract one would wish for 3805 * consistent position semantics across 3806 * substr(), index() and rindex() -- or at the 3807 * very least self-consistent position 3808 * semantics for index() and rindex() -- we 3809 * instead opt to keep with the extant Perl 3810 * semantics, in all their broken glory. (Do 3811 * we have more desire to maintain Perl's 3812 * semantics than Perl does? Probably.) 3813 */ 3814 if (subr == DIF_SUBR_RINDEX) { 3815 if (pos < 0) { 3816 if (sublen == 0) 3817 regs[rd] = 0; 3818 break; 3819 } 3820 3821 if (pos > len) 3822 pos = len; 3823 } else { 3824 if (pos < 0) 3825 pos = 0; 3826 3827 if (pos >= len) { 3828 if (sublen == 0) 3829 regs[rd] = len; 3830 break; 3831 } 3832 } 3833 3834 addr = orig + pos; 3835 } 3836 } 3837 3838 for (regs[rd] = notfound; addr != limit; addr += inc) { 3839 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3840 if (subr != DIF_SUBR_STRSTR) { 3841 /* 3842 * As D index() and rindex() are 3843 * modeled on Perl (and not on awk), 3844 * we return a zero-based (and not a 3845 * one-based) index. (For you Perl 3846 * weenies: no, we're not going to add 3847 * $[ -- and shouldn't you be at a con 3848 * or something?) 3849 */ 3850 regs[rd] = (uintptr_t)(addr - orig); 3851 break; 3852 } 3853 3854 ASSERT(subr == DIF_SUBR_STRSTR); 3855 regs[rd] = (uintptr_t)addr; 3856 break; 3857 } 3858 } 3859 3860 break; 3861 } 3862 3863 case DIF_SUBR_STRTOK: { 3864 uintptr_t addr = tupregs[0].dttk_value; 3865 uintptr_t tokaddr = tupregs[1].dttk_value; 3866 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3867 uintptr_t limit, toklimit = tokaddr + size; 3868 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3869 char *dest = (char *)mstate->dtms_scratch_ptr; 3870 int i; 3871 3872 /* 3873 * Check both the token buffer and (later) the input buffer, 3874 * since both could be non-scratch addresses. 3875 */ 3876 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3877 regs[rd] = 0; 3878 break; 3879 } 3880 3881 if (!DTRACE_INSCRATCH(mstate, size)) { 3882 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3883 regs[rd] = 0; 3884 break; 3885 } 3886 3887 if (addr == 0) { 3888 /* 3889 * If the address specified is NULL, we use our saved 3890 * strtok pointer from the mstate. Note that this 3891 * means that the saved strtok pointer is _only_ 3892 * valid within multiple enablings of the same probe -- 3893 * it behaves like an implicit clause-local variable. 3894 */ 3895 addr = mstate->dtms_strtok; 3896 } else { 3897 /* 3898 * If the user-specified address is non-NULL we must 3899 * access check it. This is the only time we have 3900 * a chance to do so, since this address may reside 3901 * in the string table of this clause-- future calls 3902 * (when we fetch addr from mstate->dtms_strtok) 3903 * would fail this access check. 3904 */ 3905 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3906 regs[rd] = 0; 3907 break; 3908 } 3909 } 3910 3911 /* 3912 * First, zero the token map, and then process the token 3913 * string -- setting a bit in the map for every character 3914 * found in the token string. 3915 */ 3916 for (i = 0; i < sizeof (tokmap); i++) 3917 tokmap[i] = 0; 3918 3919 for (; tokaddr < toklimit; tokaddr++) { 3920 if ((c = dtrace_load8(tokaddr)) == '\0') 3921 break; 3922 3923 ASSERT((c >> 3) < sizeof (tokmap)); 3924 tokmap[c >> 3] |= (1 << (c & 0x7)); 3925 } 3926 3927 for (limit = addr + size; addr < limit; addr++) { 3928 /* 3929 * We're looking for a character that is _not_ contained 3930 * in the token string. 3931 */ 3932 if ((c = dtrace_load8(addr)) == '\0') 3933 break; 3934 3935 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3936 break; 3937 } 3938 3939 if (c == '\0') { 3940 /* 3941 * We reached the end of the string without finding 3942 * any character that was not in the token string. 3943 * We return NULL in this case, and we set the saved 3944 * address to NULL as well. 3945 */ 3946 regs[rd] = 0; 3947 mstate->dtms_strtok = 0; 3948 break; 3949 } 3950 3951 /* 3952 * From here on, we're copying into the destination string. 3953 */ 3954 for (i = 0; addr < limit && i < size - 1; addr++) { 3955 if ((c = dtrace_load8(addr)) == '\0') 3956 break; 3957 3958 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3959 break; 3960 3961 ASSERT(i < size); 3962 dest[i++] = c; 3963 } 3964 3965 ASSERT(i < size); 3966 dest[i] = '\0'; 3967 regs[rd] = (uintptr_t)dest; 3968 mstate->dtms_scratch_ptr += size; 3969 mstate->dtms_strtok = addr; 3970 break; 3971 } 3972 3973 case DIF_SUBR_SUBSTR: { 3974 uintptr_t s = tupregs[0].dttk_value; 3975 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3976 char *d = (char *)mstate->dtms_scratch_ptr; 3977 int64_t index = (int64_t)tupregs[1].dttk_value; 3978 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3979 size_t len = dtrace_strlen((char *)s, size); 3980 int64_t i = 0; 3981 3982 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3983 regs[rd] = 0; 3984 break; 3985 } 3986 3987 if (!DTRACE_INSCRATCH(mstate, size)) { 3988 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3989 regs[rd] = 0; 3990 break; 3991 } 3992 3993 if (nargs <= 2) 3994 remaining = (int64_t)size; 3995 3996 if (index < 0) { 3997 index += len; 3998 3999 if (index < 0 && index + remaining > 0) { 4000 remaining += index; 4001 index = 0; 4002 } 4003 } 4004 4005 if (index >= len || index < 0) { 4006 remaining = 0; 4007 } else if (remaining < 0) { 4008 remaining += len - index; 4009 } else if (index + remaining > size) { 4010 remaining = size - index; 4011 } 4012 4013 for (i = 0; i < remaining; i++) { 4014 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4015 break; 4016 } 4017 4018 d[i] = '\0'; 4019 4020 mstate->dtms_scratch_ptr += size; 4021 regs[rd] = (uintptr_t)d; 4022 break; 4023 } 4024 4025 case DIF_SUBR_TOUPPER: 4026 case DIF_SUBR_TOLOWER: { 4027 uintptr_t s = tupregs[0].dttk_value; 4028 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4029 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4030 size_t len = dtrace_strlen((char *)s, size); 4031 char lower, upper, convert; 4032 int64_t i; 4033 4034 if (subr == DIF_SUBR_TOUPPER) { 4035 lower = 'a'; 4036 upper = 'z'; 4037 convert = 'A'; 4038 } else { 4039 lower = 'A'; 4040 upper = 'Z'; 4041 convert = 'a'; 4042 } 4043 4044 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4045 regs[rd] = 0; 4046 break; 4047 } 4048 4049 if (!DTRACE_INSCRATCH(mstate, size)) { 4050 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4051 regs[rd] = 0; 4052 break; 4053 } 4054 4055 for (i = 0; i < size - 1; i++) { 4056 if ((c = dtrace_load8(s + i)) == '\0') 4057 break; 4058 4059 if (c >= lower && c <= upper) 4060 c = convert + (c - lower); 4061 4062 dest[i] = c; 4063 } 4064 4065 ASSERT(i < size); 4066 dest[i] = '\0'; 4067 regs[rd] = (uintptr_t)dest; 4068 mstate->dtms_scratch_ptr += size; 4069 break; 4070 } 4071 4072 #if defined(sun) 4073 case DIF_SUBR_GETMAJOR: 4074 #ifdef _LP64 4075 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4076 #else 4077 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4078 #endif 4079 break; 4080 4081 case DIF_SUBR_GETMINOR: 4082 #ifdef _LP64 4083 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4084 #else 4085 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4086 #endif 4087 break; 4088 4089 case DIF_SUBR_DDI_PATHNAME: { 4090 /* 4091 * This one is a galactic mess. We are going to roughly 4092 * emulate ddi_pathname(), but it's made more complicated 4093 * by the fact that we (a) want to include the minor name and 4094 * (b) must proceed iteratively instead of recursively. 4095 */ 4096 uintptr_t dest = mstate->dtms_scratch_ptr; 4097 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4098 char *start = (char *)dest, *end = start + size - 1; 4099 uintptr_t daddr = tupregs[0].dttk_value; 4100 int64_t minor = (int64_t)tupregs[1].dttk_value; 4101 char *s; 4102 int i, len, depth = 0; 4103 4104 /* 4105 * Due to all the pointer jumping we do and context we must 4106 * rely upon, we just mandate that the user must have kernel 4107 * read privileges to use this routine. 4108 */ 4109 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4110 *flags |= CPU_DTRACE_KPRIV; 4111 *illval = daddr; 4112 regs[rd] = 0; 4113 } 4114 4115 if (!DTRACE_INSCRATCH(mstate, size)) { 4116 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4117 regs[rd] = 0; 4118 break; 4119 } 4120 4121 *end = '\0'; 4122 4123 /* 4124 * We want to have a name for the minor. In order to do this, 4125 * we need to walk the minor list from the devinfo. We want 4126 * to be sure that we don't infinitely walk a circular list, 4127 * so we check for circularity by sending a scout pointer 4128 * ahead two elements for every element that we iterate over; 4129 * if the list is circular, these will ultimately point to the 4130 * same element. You may recognize this little trick as the 4131 * answer to a stupid interview question -- one that always 4132 * seems to be asked by those who had to have it laboriously 4133 * explained to them, and who can't even concisely describe 4134 * the conditions under which one would be forced to resort to 4135 * this technique. Needless to say, those conditions are 4136 * found here -- and probably only here. Is this the only use 4137 * of this infamous trick in shipping, production code? If it 4138 * isn't, it probably should be... 4139 */ 4140 if (minor != -1) { 4141 uintptr_t maddr = dtrace_loadptr(daddr + 4142 offsetof(struct dev_info, devi_minor)); 4143 4144 uintptr_t next = offsetof(struct ddi_minor_data, next); 4145 uintptr_t name = offsetof(struct ddi_minor_data, 4146 d_minor) + offsetof(struct ddi_minor, name); 4147 uintptr_t dev = offsetof(struct ddi_minor_data, 4148 d_minor) + offsetof(struct ddi_minor, dev); 4149 uintptr_t scout; 4150 4151 if (maddr != NULL) 4152 scout = dtrace_loadptr(maddr + next); 4153 4154 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4155 uint64_t m; 4156 #ifdef _LP64 4157 m = dtrace_load64(maddr + dev) & MAXMIN64; 4158 #else 4159 m = dtrace_load32(maddr + dev) & MAXMIN; 4160 #endif 4161 if (m != minor) { 4162 maddr = dtrace_loadptr(maddr + next); 4163 4164 if (scout == NULL) 4165 continue; 4166 4167 scout = dtrace_loadptr(scout + next); 4168 4169 if (scout == NULL) 4170 continue; 4171 4172 scout = dtrace_loadptr(scout + next); 4173 4174 if (scout == NULL) 4175 continue; 4176 4177 if (scout == maddr) { 4178 *flags |= CPU_DTRACE_ILLOP; 4179 break; 4180 } 4181 4182 continue; 4183 } 4184 4185 /* 4186 * We have the minor data. Now we need to 4187 * copy the minor's name into the end of the 4188 * pathname. 4189 */ 4190 s = (char *)dtrace_loadptr(maddr + name); 4191 len = dtrace_strlen(s, size); 4192 4193 if (*flags & CPU_DTRACE_FAULT) 4194 break; 4195 4196 if (len != 0) { 4197 if ((end -= (len + 1)) < start) 4198 break; 4199 4200 *end = ':'; 4201 } 4202 4203 for (i = 1; i <= len; i++) 4204 end[i] = dtrace_load8((uintptr_t)s++); 4205 break; 4206 } 4207 } 4208 4209 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4210 ddi_node_state_t devi_state; 4211 4212 devi_state = dtrace_load32(daddr + 4213 offsetof(struct dev_info, devi_node_state)); 4214 4215 if (*flags & CPU_DTRACE_FAULT) 4216 break; 4217 4218 if (devi_state >= DS_INITIALIZED) { 4219 s = (char *)dtrace_loadptr(daddr + 4220 offsetof(struct dev_info, devi_addr)); 4221 len = dtrace_strlen(s, size); 4222 4223 if (*flags & CPU_DTRACE_FAULT) 4224 break; 4225 4226 if (len != 0) { 4227 if ((end -= (len + 1)) < start) 4228 break; 4229 4230 *end = '@'; 4231 } 4232 4233 for (i = 1; i <= len; i++) 4234 end[i] = dtrace_load8((uintptr_t)s++); 4235 } 4236 4237 /* 4238 * Now for the node name... 4239 */ 4240 s = (char *)dtrace_loadptr(daddr + 4241 offsetof(struct dev_info, devi_node_name)); 4242 4243 daddr = dtrace_loadptr(daddr + 4244 offsetof(struct dev_info, devi_parent)); 4245 4246 /* 4247 * If our parent is NULL (that is, if we're the root 4248 * node), we're going to use the special path 4249 * "devices". 4250 */ 4251 if (daddr == 0) 4252 s = "devices"; 4253 4254 len = dtrace_strlen(s, size); 4255 if (*flags & CPU_DTRACE_FAULT) 4256 break; 4257 4258 if ((end -= (len + 1)) < start) 4259 break; 4260 4261 for (i = 1; i <= len; i++) 4262 end[i] = dtrace_load8((uintptr_t)s++); 4263 *end = '/'; 4264 4265 if (depth++ > dtrace_devdepth_max) { 4266 *flags |= CPU_DTRACE_ILLOP; 4267 break; 4268 } 4269 } 4270 4271 if (end < start) 4272 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4273 4274 if (daddr == 0) { 4275 regs[rd] = (uintptr_t)end; 4276 mstate->dtms_scratch_ptr += size; 4277 } 4278 4279 break; 4280 } 4281 #endif 4282 4283 case DIF_SUBR_STRJOIN: { 4284 char *d = (char *)mstate->dtms_scratch_ptr; 4285 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4286 uintptr_t s1 = tupregs[0].dttk_value; 4287 uintptr_t s2 = tupregs[1].dttk_value; 4288 int i = 0; 4289 4290 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4291 !dtrace_strcanload(s2, size, mstate, vstate)) { 4292 regs[rd] = 0; 4293 break; 4294 } 4295 4296 if (!DTRACE_INSCRATCH(mstate, size)) { 4297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4298 regs[rd] = 0; 4299 break; 4300 } 4301 4302 for (;;) { 4303 if (i >= size) { 4304 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4305 regs[rd] = 0; 4306 break; 4307 } 4308 4309 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4310 i--; 4311 break; 4312 } 4313 } 4314 4315 for (;;) { 4316 if (i >= size) { 4317 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4318 regs[rd] = 0; 4319 break; 4320 } 4321 4322 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4323 break; 4324 } 4325 4326 if (i < size) { 4327 mstate->dtms_scratch_ptr += i; 4328 regs[rd] = (uintptr_t)d; 4329 } 4330 4331 break; 4332 } 4333 4334 case DIF_SUBR_LLTOSTR: { 4335 int64_t i = (int64_t)tupregs[0].dttk_value; 4336 uint64_t val, digit; 4337 uint64_t size = 65; /* enough room for 2^64 in binary */ 4338 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4339 int base = 10; 4340 4341 if (nargs > 1) { 4342 if ((base = tupregs[1].dttk_value) <= 1 || 4343 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4344 *flags |= CPU_DTRACE_ILLOP; 4345 break; 4346 } 4347 } 4348 4349 val = (base == 10 && i < 0) ? i * -1 : i; 4350 4351 if (!DTRACE_INSCRATCH(mstate, size)) { 4352 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4353 regs[rd] = 0; 4354 break; 4355 } 4356 4357 for (*end-- = '\0'; val; val /= base) { 4358 if ((digit = val % base) <= '9' - '0') { 4359 *end-- = '0' + digit; 4360 } else { 4361 *end-- = 'a' + (digit - ('9' - '0') - 1); 4362 } 4363 } 4364 4365 if (i == 0 && base == 16) 4366 *end-- = '0'; 4367 4368 if (base == 16) 4369 *end-- = 'x'; 4370 4371 if (i == 0 || base == 8 || base == 16) 4372 *end-- = '0'; 4373 4374 if (i < 0 && base == 10) 4375 *end-- = '-'; 4376 4377 regs[rd] = (uintptr_t)end + 1; 4378 mstate->dtms_scratch_ptr += size; 4379 break; 4380 } 4381 4382 case DIF_SUBR_HTONS: 4383 case DIF_SUBR_NTOHS: 4384 #if BYTE_ORDER == BIG_ENDIAN 4385 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4386 #else 4387 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4388 #endif 4389 break; 4390 4391 4392 case DIF_SUBR_HTONL: 4393 case DIF_SUBR_NTOHL: 4394 #if BYTE_ORDER == BIG_ENDIAN 4395 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4396 #else 4397 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4398 #endif 4399 break; 4400 4401 4402 case DIF_SUBR_HTONLL: 4403 case DIF_SUBR_NTOHLL: 4404 #if BYTE_ORDER == BIG_ENDIAN 4405 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4406 #else 4407 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4408 #endif 4409 break; 4410 4411 4412 case DIF_SUBR_DIRNAME: 4413 case DIF_SUBR_BASENAME: { 4414 char *dest = (char *)mstate->dtms_scratch_ptr; 4415 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4416 uintptr_t src = tupregs[0].dttk_value; 4417 int i, j, len = dtrace_strlen((char *)src, size); 4418 int lastbase = -1, firstbase = -1, lastdir = -1; 4419 int start, end; 4420 4421 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4422 regs[rd] = 0; 4423 break; 4424 } 4425 4426 if (!DTRACE_INSCRATCH(mstate, size)) { 4427 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4428 regs[rd] = 0; 4429 break; 4430 } 4431 4432 /* 4433 * The basename and dirname for a zero-length string is 4434 * defined to be "." 4435 */ 4436 if (len == 0) { 4437 len = 1; 4438 src = (uintptr_t)"."; 4439 } 4440 4441 /* 4442 * Start from the back of the string, moving back toward the 4443 * front until we see a character that isn't a slash. That 4444 * character is the last character in the basename. 4445 */ 4446 for (i = len - 1; i >= 0; i--) { 4447 if (dtrace_load8(src + i) != '/') 4448 break; 4449 } 4450 4451 if (i >= 0) 4452 lastbase = i; 4453 4454 /* 4455 * Starting from the last character in the basename, move 4456 * towards the front until we find a slash. The character 4457 * that we processed immediately before that is the first 4458 * character in the basename. 4459 */ 4460 for (; i >= 0; i--) { 4461 if (dtrace_load8(src + i) == '/') 4462 break; 4463 } 4464 4465 if (i >= 0) 4466 firstbase = i + 1; 4467 4468 /* 4469 * Now keep going until we find a non-slash character. That 4470 * character is the last character in the dirname. 4471 */ 4472 for (; i >= 0; i--) { 4473 if (dtrace_load8(src + i) != '/') 4474 break; 4475 } 4476 4477 if (i >= 0) 4478 lastdir = i; 4479 4480 ASSERT(!(lastbase == -1 && firstbase != -1)); 4481 ASSERT(!(firstbase == -1 && lastdir != -1)); 4482 4483 if (lastbase == -1) { 4484 /* 4485 * We didn't find a non-slash character. We know that 4486 * the length is non-zero, so the whole string must be 4487 * slashes. In either the dirname or the basename 4488 * case, we return '/'. 4489 */ 4490 ASSERT(firstbase == -1); 4491 firstbase = lastbase = lastdir = 0; 4492 } 4493 4494 if (firstbase == -1) { 4495 /* 4496 * The entire string consists only of a basename 4497 * component. If we're looking for dirname, we need 4498 * to change our string to be just "."; if we're 4499 * looking for a basename, we'll just set the first 4500 * character of the basename to be 0. 4501 */ 4502 if (subr == DIF_SUBR_DIRNAME) { 4503 ASSERT(lastdir == -1); 4504 src = (uintptr_t)"."; 4505 lastdir = 0; 4506 } else { 4507 firstbase = 0; 4508 } 4509 } 4510 4511 if (subr == DIF_SUBR_DIRNAME) { 4512 if (lastdir == -1) { 4513 /* 4514 * We know that we have a slash in the name -- 4515 * or lastdir would be set to 0, above. And 4516 * because lastdir is -1, we know that this 4517 * slash must be the first character. (That 4518 * is, the full string must be of the form 4519 * "/basename".) In this case, the last 4520 * character of the directory name is 0. 4521 */ 4522 lastdir = 0; 4523 } 4524 4525 start = 0; 4526 end = lastdir; 4527 } else { 4528 ASSERT(subr == DIF_SUBR_BASENAME); 4529 ASSERT(firstbase != -1 && lastbase != -1); 4530 start = firstbase; 4531 end = lastbase; 4532 } 4533 4534 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4535 dest[j] = dtrace_load8(src + i); 4536 4537 dest[j] = '\0'; 4538 regs[rd] = (uintptr_t)dest; 4539 mstate->dtms_scratch_ptr += size; 4540 break; 4541 } 4542 4543 case DIF_SUBR_CLEANPATH: { 4544 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4545 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4546 uintptr_t src = tupregs[0].dttk_value; 4547 int i = 0, j = 0; 4548 4549 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4550 regs[rd] = 0; 4551 break; 4552 } 4553 4554 if (!DTRACE_INSCRATCH(mstate, size)) { 4555 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4556 regs[rd] = 0; 4557 break; 4558 } 4559 4560 /* 4561 * Move forward, loading each character. 4562 */ 4563 do { 4564 c = dtrace_load8(src + i++); 4565 next: 4566 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4567 break; 4568 4569 if (c != '/') { 4570 dest[j++] = c; 4571 continue; 4572 } 4573 4574 c = dtrace_load8(src + i++); 4575 4576 if (c == '/') { 4577 /* 4578 * We have two slashes -- we can just advance 4579 * to the next character. 4580 */ 4581 goto next; 4582 } 4583 4584 if (c != '.') { 4585 /* 4586 * This is not "." and it's not ".." -- we can 4587 * just store the "/" and this character and 4588 * drive on. 4589 */ 4590 dest[j++] = '/'; 4591 dest[j++] = c; 4592 continue; 4593 } 4594 4595 c = dtrace_load8(src + i++); 4596 4597 if (c == '/') { 4598 /* 4599 * This is a "/./" component. We're not going 4600 * to store anything in the destination buffer; 4601 * we're just going to go to the next component. 4602 */ 4603 goto next; 4604 } 4605 4606 if (c != '.') { 4607 /* 4608 * This is not ".." -- we can just store the 4609 * "/." and this character and continue 4610 * processing. 4611 */ 4612 dest[j++] = '/'; 4613 dest[j++] = '.'; 4614 dest[j++] = c; 4615 continue; 4616 } 4617 4618 c = dtrace_load8(src + i++); 4619 4620 if (c != '/' && c != '\0') { 4621 /* 4622 * This is not ".." -- it's "..[mumble]". 4623 * We'll store the "/.." and this character 4624 * and continue processing. 4625 */ 4626 dest[j++] = '/'; 4627 dest[j++] = '.'; 4628 dest[j++] = '.'; 4629 dest[j++] = c; 4630 continue; 4631 } 4632 4633 /* 4634 * This is "/../" or "/..\0". We need to back up 4635 * our destination pointer until we find a "/". 4636 */ 4637 i--; 4638 while (j != 0 && dest[--j] != '/') 4639 continue; 4640 4641 if (c == '\0') 4642 dest[++j] = '/'; 4643 } while (c != '\0'); 4644 4645 dest[j] = '\0'; 4646 regs[rd] = (uintptr_t)dest; 4647 mstate->dtms_scratch_ptr += size; 4648 break; 4649 } 4650 4651 case DIF_SUBR_INET_NTOA: 4652 case DIF_SUBR_INET_NTOA6: 4653 case DIF_SUBR_INET_NTOP: { 4654 size_t size; 4655 int af, argi, i; 4656 char *base, *end; 4657 4658 if (subr == DIF_SUBR_INET_NTOP) { 4659 af = (int)tupregs[0].dttk_value; 4660 argi = 1; 4661 } else { 4662 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4663 argi = 0; 4664 } 4665 4666 if (af == AF_INET) { 4667 ipaddr_t ip4; 4668 uint8_t *ptr8, val; 4669 4670 /* 4671 * Safely load the IPv4 address. 4672 */ 4673 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4674 4675 /* 4676 * Check an IPv4 string will fit in scratch. 4677 */ 4678 size = INET_ADDRSTRLEN; 4679 if (!DTRACE_INSCRATCH(mstate, size)) { 4680 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4681 regs[rd] = 0; 4682 break; 4683 } 4684 base = (char *)mstate->dtms_scratch_ptr; 4685 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4686 4687 /* 4688 * Stringify as a dotted decimal quad. 4689 */ 4690 *end-- = '\0'; 4691 ptr8 = (uint8_t *)&ip4; 4692 for (i = 3; i >= 0; i--) { 4693 val = ptr8[i]; 4694 4695 if (val == 0) { 4696 *end-- = '0'; 4697 } else { 4698 for (; val; val /= 10) { 4699 *end-- = '0' + (val % 10); 4700 } 4701 } 4702 4703 if (i > 0) 4704 *end-- = '.'; 4705 } 4706 ASSERT(end + 1 >= base); 4707 4708 } else if (af == AF_INET6) { 4709 struct in6_addr ip6; 4710 int firstzero, tryzero, numzero, v6end; 4711 uint16_t val; 4712 const char digits[] = "0123456789abcdef"; 4713 4714 /* 4715 * Stringify using RFC 1884 convention 2 - 16 bit 4716 * hexadecimal values with a zero-run compression. 4717 * Lower case hexadecimal digits are used. 4718 * eg, fe80::214:4fff:fe0b:76c8. 4719 * The IPv4 embedded form is returned for inet_ntop, 4720 * just the IPv4 string is returned for inet_ntoa6. 4721 */ 4722 4723 /* 4724 * Safely load the IPv6 address. 4725 */ 4726 dtrace_bcopy( 4727 (void *)(uintptr_t)tupregs[argi].dttk_value, 4728 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4729 4730 /* 4731 * Check an IPv6 string will fit in scratch. 4732 */ 4733 size = INET6_ADDRSTRLEN; 4734 if (!DTRACE_INSCRATCH(mstate, size)) { 4735 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4736 regs[rd] = 0; 4737 break; 4738 } 4739 base = (char *)mstate->dtms_scratch_ptr; 4740 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4741 *end-- = '\0'; 4742 4743 /* 4744 * Find the longest run of 16 bit zero values 4745 * for the single allowed zero compression - "::". 4746 */ 4747 firstzero = -1; 4748 tryzero = -1; 4749 numzero = 1; 4750 for (i = 0; i < sizeof (struct in6_addr); i++) { 4751 #if defined(sun) 4752 if (ip6._S6_un._S6_u8[i] == 0 && 4753 #else 4754 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4755 #endif 4756 tryzero == -1 && i % 2 == 0) { 4757 tryzero = i; 4758 continue; 4759 } 4760 4761 if (tryzero != -1 && 4762 #if defined(sun) 4763 (ip6._S6_un._S6_u8[i] != 0 || 4764 #else 4765 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4766 #endif 4767 i == sizeof (struct in6_addr) - 1)) { 4768 4769 if (i - tryzero <= numzero) { 4770 tryzero = -1; 4771 continue; 4772 } 4773 4774 firstzero = tryzero; 4775 numzero = i - i % 2 - tryzero; 4776 tryzero = -1; 4777 4778 #if defined(sun) 4779 if (ip6._S6_un._S6_u8[i] == 0 && 4780 #else 4781 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4782 #endif 4783 i == sizeof (struct in6_addr) - 1) 4784 numzero += 2; 4785 } 4786 } 4787 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4788 4789 /* 4790 * Check for an IPv4 embedded address. 4791 */ 4792 v6end = sizeof (struct in6_addr) - 2; 4793 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4794 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4795 for (i = sizeof (struct in6_addr) - 1; 4796 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4797 ASSERT(end >= base); 4798 4799 #if defined(sun) 4800 val = ip6._S6_un._S6_u8[i]; 4801 #else 4802 val = ip6.__u6_addr.__u6_addr8[i]; 4803 #endif 4804 4805 if (val == 0) { 4806 *end-- = '0'; 4807 } else { 4808 for (; val; val /= 10) { 4809 *end-- = '0' + val % 10; 4810 } 4811 } 4812 4813 if (i > DTRACE_V4MAPPED_OFFSET) 4814 *end-- = '.'; 4815 } 4816 4817 if (subr == DIF_SUBR_INET_NTOA6) 4818 goto inetout; 4819 4820 /* 4821 * Set v6end to skip the IPv4 address that 4822 * we have already stringified. 4823 */ 4824 v6end = 10; 4825 } 4826 4827 /* 4828 * Build the IPv6 string by working through the 4829 * address in reverse. 4830 */ 4831 for (i = v6end; i >= 0; i -= 2) { 4832 ASSERT(end >= base); 4833 4834 if (i == firstzero + numzero - 2) { 4835 *end-- = ':'; 4836 *end-- = ':'; 4837 i -= numzero - 2; 4838 continue; 4839 } 4840 4841 if (i < 14 && i != firstzero - 2) 4842 *end-- = ':'; 4843 4844 #if defined(sun) 4845 val = (ip6._S6_un._S6_u8[i] << 8) + 4846 ip6._S6_un._S6_u8[i + 1]; 4847 #else 4848 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4849 ip6.__u6_addr.__u6_addr8[i + 1]; 4850 #endif 4851 4852 if (val == 0) { 4853 *end-- = '0'; 4854 } else { 4855 for (; val; val /= 16) { 4856 *end-- = digits[val % 16]; 4857 } 4858 } 4859 } 4860 ASSERT(end + 1 >= base); 4861 4862 } else { 4863 /* 4864 * The user didn't use AH_INET or AH_INET6. 4865 */ 4866 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4867 regs[rd] = 0; 4868 break; 4869 } 4870 4871 inetout: regs[rd] = (uintptr_t)end + 1; 4872 mstate->dtms_scratch_ptr += size; 4873 break; 4874 } 4875 4876 case DIF_SUBR_MEMREF: { 4877 uintptr_t size = 2 * sizeof(uintptr_t); 4878 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4879 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4880 4881 /* address and length */ 4882 memref[0] = tupregs[0].dttk_value; 4883 memref[1] = tupregs[1].dttk_value; 4884 4885 regs[rd] = (uintptr_t) memref; 4886 mstate->dtms_scratch_ptr += scratch_size; 4887 break; 4888 } 4889 4890 case DIF_SUBR_TYPEREF: { 4891 uintptr_t size = 4 * sizeof(uintptr_t); 4892 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4893 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4894 4895 /* address, num_elements, type_str, type_len */ 4896 typeref[0] = tupregs[0].dttk_value; 4897 typeref[1] = tupregs[1].dttk_value; 4898 typeref[2] = tupregs[2].dttk_value; 4899 typeref[3] = tupregs[3].dttk_value; 4900 4901 regs[rd] = (uintptr_t) typeref; 4902 mstate->dtms_scratch_ptr += scratch_size; 4903 break; 4904 } 4905 } 4906 } 4907 4908 /* 4909 * Emulate the execution of DTrace IR instructions specified by the given 4910 * DIF object. This function is deliberately void of assertions as all of 4911 * the necessary checks are handled by a call to dtrace_difo_validate(). 4912 */ 4913 static uint64_t 4914 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4915 dtrace_vstate_t *vstate, dtrace_state_t *state) 4916 { 4917 const dif_instr_t *text = difo->dtdo_buf; 4918 const uint_t textlen = difo->dtdo_len; 4919 const char *strtab = difo->dtdo_strtab; 4920 const uint64_t *inttab = difo->dtdo_inttab; 4921 4922 uint64_t rval = 0; 4923 dtrace_statvar_t *svar; 4924 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4925 dtrace_difv_t *v; 4926 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4927 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4928 4929 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4930 uint64_t regs[DIF_DIR_NREGS]; 4931 uint64_t *tmp; 4932 4933 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4934 int64_t cc_r; 4935 uint_t pc = 0, id, opc = 0; 4936 uint8_t ttop = 0; 4937 dif_instr_t instr; 4938 uint_t r1, r2, rd; 4939 4940 /* 4941 * We stash the current DIF object into the machine state: we need it 4942 * for subsequent access checking. 4943 */ 4944 mstate->dtms_difo = difo; 4945 4946 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4947 4948 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4949 opc = pc; 4950 4951 instr = text[pc++]; 4952 r1 = DIF_INSTR_R1(instr); 4953 r2 = DIF_INSTR_R2(instr); 4954 rd = DIF_INSTR_RD(instr); 4955 4956 switch (DIF_INSTR_OP(instr)) { 4957 case DIF_OP_OR: 4958 regs[rd] = regs[r1] | regs[r2]; 4959 break; 4960 case DIF_OP_XOR: 4961 regs[rd] = regs[r1] ^ regs[r2]; 4962 break; 4963 case DIF_OP_AND: 4964 regs[rd] = regs[r1] & regs[r2]; 4965 break; 4966 case DIF_OP_SLL: 4967 regs[rd] = regs[r1] << regs[r2]; 4968 break; 4969 case DIF_OP_SRL: 4970 regs[rd] = regs[r1] >> regs[r2]; 4971 break; 4972 case DIF_OP_SUB: 4973 regs[rd] = regs[r1] - regs[r2]; 4974 break; 4975 case DIF_OP_ADD: 4976 regs[rd] = regs[r1] + regs[r2]; 4977 break; 4978 case DIF_OP_MUL: 4979 regs[rd] = regs[r1] * regs[r2]; 4980 break; 4981 case DIF_OP_SDIV: 4982 if (regs[r2] == 0) { 4983 regs[rd] = 0; 4984 *flags |= CPU_DTRACE_DIVZERO; 4985 } else { 4986 regs[rd] = (int64_t)regs[r1] / 4987 (int64_t)regs[r2]; 4988 } 4989 break; 4990 4991 case DIF_OP_UDIV: 4992 if (regs[r2] == 0) { 4993 regs[rd] = 0; 4994 *flags |= CPU_DTRACE_DIVZERO; 4995 } else { 4996 regs[rd] = regs[r1] / regs[r2]; 4997 } 4998 break; 4999 5000 case DIF_OP_SREM: 5001 if (regs[r2] == 0) { 5002 regs[rd] = 0; 5003 *flags |= CPU_DTRACE_DIVZERO; 5004 } else { 5005 regs[rd] = (int64_t)regs[r1] % 5006 (int64_t)regs[r2]; 5007 } 5008 break; 5009 5010 case DIF_OP_UREM: 5011 if (regs[r2] == 0) { 5012 regs[rd] = 0; 5013 *flags |= CPU_DTRACE_DIVZERO; 5014 } else { 5015 regs[rd] = regs[r1] % regs[r2]; 5016 } 5017 break; 5018 5019 case DIF_OP_NOT: 5020 regs[rd] = ~regs[r1]; 5021 break; 5022 case DIF_OP_MOV: 5023 regs[rd] = regs[r1]; 5024 break; 5025 case DIF_OP_CMP: 5026 cc_r = regs[r1] - regs[r2]; 5027 cc_n = cc_r < 0; 5028 cc_z = cc_r == 0; 5029 cc_v = 0; 5030 cc_c = regs[r1] < regs[r2]; 5031 break; 5032 case DIF_OP_TST: 5033 cc_n = cc_v = cc_c = 0; 5034 cc_z = regs[r1] == 0; 5035 break; 5036 case DIF_OP_BA: 5037 pc = DIF_INSTR_LABEL(instr); 5038 break; 5039 case DIF_OP_BE: 5040 if (cc_z) 5041 pc = DIF_INSTR_LABEL(instr); 5042 break; 5043 case DIF_OP_BNE: 5044 if (cc_z == 0) 5045 pc = DIF_INSTR_LABEL(instr); 5046 break; 5047 case DIF_OP_BG: 5048 if ((cc_z | (cc_n ^ cc_v)) == 0) 5049 pc = DIF_INSTR_LABEL(instr); 5050 break; 5051 case DIF_OP_BGU: 5052 if ((cc_c | cc_z) == 0) 5053 pc = DIF_INSTR_LABEL(instr); 5054 break; 5055 case DIF_OP_BGE: 5056 if ((cc_n ^ cc_v) == 0) 5057 pc = DIF_INSTR_LABEL(instr); 5058 break; 5059 case DIF_OP_BGEU: 5060 if (cc_c == 0) 5061 pc = DIF_INSTR_LABEL(instr); 5062 break; 5063 case DIF_OP_BL: 5064 if (cc_n ^ cc_v) 5065 pc = DIF_INSTR_LABEL(instr); 5066 break; 5067 case DIF_OP_BLU: 5068 if (cc_c) 5069 pc = DIF_INSTR_LABEL(instr); 5070 break; 5071 case DIF_OP_BLE: 5072 if (cc_z | (cc_n ^ cc_v)) 5073 pc = DIF_INSTR_LABEL(instr); 5074 break; 5075 case DIF_OP_BLEU: 5076 if (cc_c | cc_z) 5077 pc = DIF_INSTR_LABEL(instr); 5078 break; 5079 case DIF_OP_RLDSB: 5080 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5081 *flags |= CPU_DTRACE_KPRIV; 5082 *illval = regs[r1]; 5083 break; 5084 } 5085 /*FALLTHROUGH*/ 5086 case DIF_OP_LDSB: 5087 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5088 break; 5089 case DIF_OP_RLDSH: 5090 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5091 *flags |= CPU_DTRACE_KPRIV; 5092 *illval = regs[r1]; 5093 break; 5094 } 5095 /*FALLTHROUGH*/ 5096 case DIF_OP_LDSH: 5097 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5098 break; 5099 case DIF_OP_RLDSW: 5100 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5101 *flags |= CPU_DTRACE_KPRIV; 5102 *illval = regs[r1]; 5103 break; 5104 } 5105 /*FALLTHROUGH*/ 5106 case DIF_OP_LDSW: 5107 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5108 break; 5109 case DIF_OP_RLDUB: 5110 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5111 *flags |= CPU_DTRACE_KPRIV; 5112 *illval = regs[r1]; 5113 break; 5114 } 5115 /*FALLTHROUGH*/ 5116 case DIF_OP_LDUB: 5117 regs[rd] = dtrace_load8(regs[r1]); 5118 break; 5119 case DIF_OP_RLDUH: 5120 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5121 *flags |= CPU_DTRACE_KPRIV; 5122 *illval = regs[r1]; 5123 break; 5124 } 5125 /*FALLTHROUGH*/ 5126 case DIF_OP_LDUH: 5127 regs[rd] = dtrace_load16(regs[r1]); 5128 break; 5129 case DIF_OP_RLDUW: 5130 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5131 *flags |= CPU_DTRACE_KPRIV; 5132 *illval = regs[r1]; 5133 break; 5134 } 5135 /*FALLTHROUGH*/ 5136 case DIF_OP_LDUW: 5137 regs[rd] = dtrace_load32(regs[r1]); 5138 break; 5139 case DIF_OP_RLDX: 5140 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5141 *flags |= CPU_DTRACE_KPRIV; 5142 *illval = regs[r1]; 5143 break; 5144 } 5145 /*FALLTHROUGH*/ 5146 case DIF_OP_LDX: 5147 regs[rd] = dtrace_load64(regs[r1]); 5148 break; 5149 case DIF_OP_ULDSB: 5150 regs[rd] = (int8_t) 5151 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5152 break; 5153 case DIF_OP_ULDSH: 5154 regs[rd] = (int16_t) 5155 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5156 break; 5157 case DIF_OP_ULDSW: 5158 regs[rd] = (int32_t) 5159 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5160 break; 5161 case DIF_OP_ULDUB: 5162 regs[rd] = 5163 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5164 break; 5165 case DIF_OP_ULDUH: 5166 regs[rd] = 5167 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5168 break; 5169 case DIF_OP_ULDUW: 5170 regs[rd] = 5171 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5172 break; 5173 case DIF_OP_ULDX: 5174 regs[rd] = 5175 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5176 break; 5177 case DIF_OP_RET: 5178 rval = regs[rd]; 5179 pc = textlen; 5180 break; 5181 case DIF_OP_NOP: 5182 break; 5183 case DIF_OP_SETX: 5184 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5185 break; 5186 case DIF_OP_SETS: 5187 regs[rd] = (uint64_t)(uintptr_t) 5188 (strtab + DIF_INSTR_STRING(instr)); 5189 break; 5190 case DIF_OP_SCMP: { 5191 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5192 uintptr_t s1 = regs[r1]; 5193 uintptr_t s2 = regs[r2]; 5194 5195 if (s1 != 0 && 5196 !dtrace_strcanload(s1, sz, mstate, vstate)) 5197 break; 5198 if (s2 != 0 && 5199 !dtrace_strcanload(s2, sz, mstate, vstate)) 5200 break; 5201 5202 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5203 5204 cc_n = cc_r < 0; 5205 cc_z = cc_r == 0; 5206 cc_v = cc_c = 0; 5207 break; 5208 } 5209 case DIF_OP_LDGA: 5210 regs[rd] = dtrace_dif_variable(mstate, state, 5211 r1, regs[r2]); 5212 break; 5213 case DIF_OP_LDGS: 5214 id = DIF_INSTR_VAR(instr); 5215 5216 if (id >= DIF_VAR_OTHER_UBASE) { 5217 uintptr_t a; 5218 5219 id -= DIF_VAR_OTHER_UBASE; 5220 svar = vstate->dtvs_globals[id]; 5221 ASSERT(svar != NULL); 5222 v = &svar->dtsv_var; 5223 5224 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5225 regs[rd] = svar->dtsv_data; 5226 break; 5227 } 5228 5229 a = (uintptr_t)svar->dtsv_data; 5230 5231 if (*(uint8_t *)a == UINT8_MAX) { 5232 /* 5233 * If the 0th byte is set to UINT8_MAX 5234 * then this is to be treated as a 5235 * reference to a NULL variable. 5236 */ 5237 regs[rd] = 0; 5238 } else { 5239 regs[rd] = a + sizeof (uint64_t); 5240 } 5241 5242 break; 5243 } 5244 5245 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5246 break; 5247 5248 case DIF_OP_STGS: 5249 id = DIF_INSTR_VAR(instr); 5250 5251 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5252 id -= DIF_VAR_OTHER_UBASE; 5253 5254 svar = vstate->dtvs_globals[id]; 5255 ASSERT(svar != NULL); 5256 v = &svar->dtsv_var; 5257 5258 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5259 uintptr_t a = (uintptr_t)svar->dtsv_data; 5260 5261 ASSERT(a != 0); 5262 ASSERT(svar->dtsv_size != 0); 5263 5264 if (regs[rd] == 0) { 5265 *(uint8_t *)a = UINT8_MAX; 5266 break; 5267 } else { 5268 *(uint8_t *)a = 0; 5269 a += sizeof (uint64_t); 5270 } 5271 if (!dtrace_vcanload( 5272 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5273 mstate, vstate)) 5274 break; 5275 5276 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5277 (void *)a, &v->dtdv_type); 5278 break; 5279 } 5280 5281 svar->dtsv_data = regs[rd]; 5282 break; 5283 5284 case DIF_OP_LDTA: 5285 /* 5286 * There are no DTrace built-in thread-local arrays at 5287 * present. This opcode is saved for future work. 5288 */ 5289 *flags |= CPU_DTRACE_ILLOP; 5290 regs[rd] = 0; 5291 break; 5292 5293 case DIF_OP_LDLS: 5294 id = DIF_INSTR_VAR(instr); 5295 5296 if (id < DIF_VAR_OTHER_UBASE) { 5297 /* 5298 * For now, this has no meaning. 5299 */ 5300 regs[rd] = 0; 5301 break; 5302 } 5303 5304 id -= DIF_VAR_OTHER_UBASE; 5305 5306 ASSERT(id < vstate->dtvs_nlocals); 5307 ASSERT(vstate->dtvs_locals != NULL); 5308 5309 svar = vstate->dtvs_locals[id]; 5310 ASSERT(svar != NULL); 5311 v = &svar->dtsv_var; 5312 5313 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5314 uintptr_t a = (uintptr_t)svar->dtsv_data; 5315 size_t sz = v->dtdv_type.dtdt_size; 5316 5317 sz += sizeof (uint64_t); 5318 ASSERT(svar->dtsv_size == NCPU * sz); 5319 a += curcpu * sz; 5320 5321 if (*(uint8_t *)a == UINT8_MAX) { 5322 /* 5323 * If the 0th byte is set to UINT8_MAX 5324 * then this is to be treated as a 5325 * reference to a NULL variable. 5326 */ 5327 regs[rd] = 0; 5328 } else { 5329 regs[rd] = a + sizeof (uint64_t); 5330 } 5331 5332 break; 5333 } 5334 5335 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5336 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5337 regs[rd] = tmp[curcpu]; 5338 break; 5339 5340 case DIF_OP_STLS: 5341 id = DIF_INSTR_VAR(instr); 5342 5343 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5344 id -= DIF_VAR_OTHER_UBASE; 5345 ASSERT(id < vstate->dtvs_nlocals); 5346 5347 ASSERT(vstate->dtvs_locals != NULL); 5348 svar = vstate->dtvs_locals[id]; 5349 ASSERT(svar != NULL); 5350 v = &svar->dtsv_var; 5351 5352 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5353 uintptr_t a = (uintptr_t)svar->dtsv_data; 5354 size_t sz = v->dtdv_type.dtdt_size; 5355 5356 sz += sizeof (uint64_t); 5357 ASSERT(svar->dtsv_size == NCPU * sz); 5358 a += curcpu * sz; 5359 5360 if (regs[rd] == 0) { 5361 *(uint8_t *)a = UINT8_MAX; 5362 break; 5363 } else { 5364 *(uint8_t *)a = 0; 5365 a += sizeof (uint64_t); 5366 } 5367 5368 if (!dtrace_vcanload( 5369 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5370 mstate, vstate)) 5371 break; 5372 5373 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5374 (void *)a, &v->dtdv_type); 5375 break; 5376 } 5377 5378 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5379 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5380 tmp[curcpu] = regs[rd]; 5381 break; 5382 5383 case DIF_OP_LDTS: { 5384 dtrace_dynvar_t *dvar; 5385 dtrace_key_t *key; 5386 5387 id = DIF_INSTR_VAR(instr); 5388 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5389 id -= DIF_VAR_OTHER_UBASE; 5390 v = &vstate->dtvs_tlocals[id]; 5391 5392 key = &tupregs[DIF_DTR_NREGS]; 5393 key[0].dttk_value = (uint64_t)id; 5394 key[0].dttk_size = 0; 5395 DTRACE_TLS_THRKEY(key[1].dttk_value); 5396 key[1].dttk_size = 0; 5397 5398 dvar = dtrace_dynvar(dstate, 2, key, 5399 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5400 mstate, vstate); 5401 5402 if (dvar == NULL) { 5403 regs[rd] = 0; 5404 break; 5405 } 5406 5407 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5408 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5409 } else { 5410 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5411 } 5412 5413 break; 5414 } 5415 5416 case DIF_OP_STTS: { 5417 dtrace_dynvar_t *dvar; 5418 dtrace_key_t *key; 5419 5420 id = DIF_INSTR_VAR(instr); 5421 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5422 id -= DIF_VAR_OTHER_UBASE; 5423 5424 key = &tupregs[DIF_DTR_NREGS]; 5425 key[0].dttk_value = (uint64_t)id; 5426 key[0].dttk_size = 0; 5427 DTRACE_TLS_THRKEY(key[1].dttk_value); 5428 key[1].dttk_size = 0; 5429 v = &vstate->dtvs_tlocals[id]; 5430 5431 dvar = dtrace_dynvar(dstate, 2, key, 5432 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5433 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5434 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5435 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5436 5437 /* 5438 * Given that we're storing to thread-local data, 5439 * we need to flush our predicate cache. 5440 */ 5441 curthread->t_predcache = 0; 5442 5443 if (dvar == NULL) 5444 break; 5445 5446 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5447 if (!dtrace_vcanload( 5448 (void *)(uintptr_t)regs[rd], 5449 &v->dtdv_type, mstate, vstate)) 5450 break; 5451 5452 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5453 dvar->dtdv_data, &v->dtdv_type); 5454 } else { 5455 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5456 } 5457 5458 break; 5459 } 5460 5461 case DIF_OP_SRA: 5462 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5463 break; 5464 5465 case DIF_OP_CALL: 5466 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5467 regs, tupregs, ttop, mstate, state); 5468 break; 5469 5470 case DIF_OP_PUSHTR: 5471 if (ttop == DIF_DTR_NREGS) { 5472 *flags |= CPU_DTRACE_TUPOFLOW; 5473 break; 5474 } 5475 5476 if (r1 == DIF_TYPE_STRING) { 5477 /* 5478 * If this is a string type and the size is 0, 5479 * we'll use the system-wide default string 5480 * size. Note that we are _not_ looking at 5481 * the value of the DTRACEOPT_STRSIZE option; 5482 * had this been set, we would expect to have 5483 * a non-zero size value in the "pushtr". 5484 */ 5485 tupregs[ttop].dttk_size = 5486 dtrace_strlen((char *)(uintptr_t)regs[rd], 5487 regs[r2] ? regs[r2] : 5488 dtrace_strsize_default) + 1; 5489 } else { 5490 tupregs[ttop].dttk_size = regs[r2]; 5491 } 5492 5493 tupregs[ttop++].dttk_value = regs[rd]; 5494 break; 5495 5496 case DIF_OP_PUSHTV: 5497 if (ttop == DIF_DTR_NREGS) { 5498 *flags |= CPU_DTRACE_TUPOFLOW; 5499 break; 5500 } 5501 5502 tupregs[ttop].dttk_value = regs[rd]; 5503 tupregs[ttop++].dttk_size = 0; 5504 break; 5505 5506 case DIF_OP_POPTS: 5507 if (ttop != 0) 5508 ttop--; 5509 break; 5510 5511 case DIF_OP_FLUSHTS: 5512 ttop = 0; 5513 break; 5514 5515 case DIF_OP_LDGAA: 5516 case DIF_OP_LDTAA: { 5517 dtrace_dynvar_t *dvar; 5518 dtrace_key_t *key = tupregs; 5519 uint_t nkeys = ttop; 5520 5521 id = DIF_INSTR_VAR(instr); 5522 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5523 id -= DIF_VAR_OTHER_UBASE; 5524 5525 key[nkeys].dttk_value = (uint64_t)id; 5526 key[nkeys++].dttk_size = 0; 5527 5528 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5529 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5530 key[nkeys++].dttk_size = 0; 5531 v = &vstate->dtvs_tlocals[id]; 5532 } else { 5533 v = &vstate->dtvs_globals[id]->dtsv_var; 5534 } 5535 5536 dvar = dtrace_dynvar(dstate, nkeys, key, 5537 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5538 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5539 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5540 5541 if (dvar == NULL) { 5542 regs[rd] = 0; 5543 break; 5544 } 5545 5546 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5547 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5548 } else { 5549 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5550 } 5551 5552 break; 5553 } 5554 5555 case DIF_OP_STGAA: 5556 case DIF_OP_STTAA: { 5557 dtrace_dynvar_t *dvar; 5558 dtrace_key_t *key = tupregs; 5559 uint_t nkeys = ttop; 5560 5561 id = DIF_INSTR_VAR(instr); 5562 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5563 id -= DIF_VAR_OTHER_UBASE; 5564 5565 key[nkeys].dttk_value = (uint64_t)id; 5566 key[nkeys++].dttk_size = 0; 5567 5568 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5569 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5570 key[nkeys++].dttk_size = 0; 5571 v = &vstate->dtvs_tlocals[id]; 5572 } else { 5573 v = &vstate->dtvs_globals[id]->dtsv_var; 5574 } 5575 5576 dvar = dtrace_dynvar(dstate, nkeys, key, 5577 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5578 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5579 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5580 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5581 5582 if (dvar == NULL) 5583 break; 5584 5585 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5586 if (!dtrace_vcanload( 5587 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5588 mstate, vstate)) 5589 break; 5590 5591 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5592 dvar->dtdv_data, &v->dtdv_type); 5593 } else { 5594 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5595 } 5596 5597 break; 5598 } 5599 5600 case DIF_OP_ALLOCS: { 5601 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5602 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5603 5604 /* 5605 * Rounding up the user allocation size could have 5606 * overflowed large, bogus allocations (like -1ULL) to 5607 * 0. 5608 */ 5609 if (size < regs[r1] || 5610 !DTRACE_INSCRATCH(mstate, size)) { 5611 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5612 regs[rd] = 0; 5613 break; 5614 } 5615 5616 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5617 mstate->dtms_scratch_ptr += size; 5618 regs[rd] = ptr; 5619 break; 5620 } 5621 5622 case DIF_OP_COPYS: 5623 if (!dtrace_canstore(regs[rd], regs[r2], 5624 mstate, vstate)) { 5625 *flags |= CPU_DTRACE_BADADDR; 5626 *illval = regs[rd]; 5627 break; 5628 } 5629 5630 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5631 break; 5632 5633 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5634 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5635 break; 5636 5637 case DIF_OP_STB: 5638 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5639 *flags |= CPU_DTRACE_BADADDR; 5640 *illval = regs[rd]; 5641 break; 5642 } 5643 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5644 break; 5645 5646 case DIF_OP_STH: 5647 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5648 *flags |= CPU_DTRACE_BADADDR; 5649 *illval = regs[rd]; 5650 break; 5651 } 5652 if (regs[rd] & 1) { 5653 *flags |= CPU_DTRACE_BADALIGN; 5654 *illval = regs[rd]; 5655 break; 5656 } 5657 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5658 break; 5659 5660 case DIF_OP_STW: 5661 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5662 *flags |= CPU_DTRACE_BADADDR; 5663 *illval = regs[rd]; 5664 break; 5665 } 5666 if (regs[rd] & 3) { 5667 *flags |= CPU_DTRACE_BADALIGN; 5668 *illval = regs[rd]; 5669 break; 5670 } 5671 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5672 break; 5673 5674 case DIF_OP_STX: 5675 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5676 *flags |= CPU_DTRACE_BADADDR; 5677 *illval = regs[rd]; 5678 break; 5679 } 5680 if (regs[rd] & 7) { 5681 *flags |= CPU_DTRACE_BADALIGN; 5682 *illval = regs[rd]; 5683 break; 5684 } 5685 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5686 break; 5687 } 5688 } 5689 5690 if (!(*flags & CPU_DTRACE_FAULT)) 5691 return (rval); 5692 5693 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5694 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5695 5696 return (0); 5697 } 5698 5699 static void 5700 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5701 { 5702 dtrace_probe_t *probe = ecb->dte_probe; 5703 dtrace_provider_t *prov = probe->dtpr_provider; 5704 char c[DTRACE_FULLNAMELEN + 80], *str; 5705 char *msg = "dtrace: breakpoint action at probe "; 5706 char *ecbmsg = " (ecb "; 5707 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5708 uintptr_t val = (uintptr_t)ecb; 5709 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5710 5711 if (dtrace_destructive_disallow) 5712 return; 5713 5714 /* 5715 * It's impossible to be taking action on the NULL probe. 5716 */ 5717 ASSERT(probe != NULL); 5718 5719 /* 5720 * This is a poor man's (destitute man's?) sprintf(): we want to 5721 * print the provider name, module name, function name and name of 5722 * the probe, along with the hex address of the ECB with the breakpoint 5723 * action -- all of which we must place in the character buffer by 5724 * hand. 5725 */ 5726 while (*msg != '\0') 5727 c[i++] = *msg++; 5728 5729 for (str = prov->dtpv_name; *str != '\0'; str++) 5730 c[i++] = *str; 5731 c[i++] = ':'; 5732 5733 for (str = probe->dtpr_mod; *str != '\0'; str++) 5734 c[i++] = *str; 5735 c[i++] = ':'; 5736 5737 for (str = probe->dtpr_func; *str != '\0'; str++) 5738 c[i++] = *str; 5739 c[i++] = ':'; 5740 5741 for (str = probe->dtpr_name; *str != '\0'; str++) 5742 c[i++] = *str; 5743 5744 while (*ecbmsg != '\0') 5745 c[i++] = *ecbmsg++; 5746 5747 while (shift >= 0) { 5748 mask = (uintptr_t)0xf << shift; 5749 5750 if (val >= ((uintptr_t)1 << shift)) 5751 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5752 shift -= 4; 5753 } 5754 5755 c[i++] = ')'; 5756 c[i] = '\0'; 5757 5758 #if defined(sun) 5759 debug_enter(c); 5760 #else 5761 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5762 #endif 5763 } 5764 5765 static void 5766 dtrace_action_panic(dtrace_ecb_t *ecb) 5767 { 5768 dtrace_probe_t *probe = ecb->dte_probe; 5769 5770 /* 5771 * It's impossible to be taking action on the NULL probe. 5772 */ 5773 ASSERT(probe != NULL); 5774 5775 if (dtrace_destructive_disallow) 5776 return; 5777 5778 if (dtrace_panicked != NULL) 5779 return; 5780 5781 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5782 return; 5783 5784 /* 5785 * We won the right to panic. (We want to be sure that only one 5786 * thread calls panic() from dtrace_probe(), and that panic() is 5787 * called exactly once.) 5788 */ 5789 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5790 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5791 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5792 } 5793 5794 static void 5795 dtrace_action_raise(uint64_t sig) 5796 { 5797 if (dtrace_destructive_disallow) 5798 return; 5799 5800 if (sig >= NSIG) { 5801 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5802 return; 5803 } 5804 5805 #if defined(sun) 5806 /* 5807 * raise() has a queue depth of 1 -- we ignore all subsequent 5808 * invocations of the raise() action. 5809 */ 5810 if (curthread->t_dtrace_sig == 0) 5811 curthread->t_dtrace_sig = (uint8_t)sig; 5812 5813 curthread->t_sig_check = 1; 5814 aston(curthread); 5815 #else 5816 struct proc *p = curproc; 5817 PROC_LOCK(p); 5818 kern_psignal(p, sig); 5819 PROC_UNLOCK(p); 5820 #endif 5821 } 5822 5823 static void 5824 dtrace_action_stop(void) 5825 { 5826 if (dtrace_destructive_disallow) 5827 return; 5828 5829 #if defined(sun) 5830 if (!curthread->t_dtrace_stop) { 5831 curthread->t_dtrace_stop = 1; 5832 curthread->t_sig_check = 1; 5833 aston(curthread); 5834 } 5835 #else 5836 struct proc *p = curproc; 5837 PROC_LOCK(p); 5838 kern_psignal(p, SIGSTOP); 5839 PROC_UNLOCK(p); 5840 #endif 5841 } 5842 5843 static void 5844 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5845 { 5846 hrtime_t now; 5847 volatile uint16_t *flags; 5848 #if defined(sun) 5849 cpu_t *cpu = CPU; 5850 #else 5851 cpu_t *cpu = &solaris_cpu[curcpu]; 5852 #endif 5853 5854 if (dtrace_destructive_disallow) 5855 return; 5856 5857 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5858 5859 now = dtrace_gethrtime(); 5860 5861 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5862 /* 5863 * We need to advance the mark to the current time. 5864 */ 5865 cpu->cpu_dtrace_chillmark = now; 5866 cpu->cpu_dtrace_chilled = 0; 5867 } 5868 5869 /* 5870 * Now check to see if the requested chill time would take us over 5871 * the maximum amount of time allowed in the chill interval. (Or 5872 * worse, if the calculation itself induces overflow.) 5873 */ 5874 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5875 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5876 *flags |= CPU_DTRACE_ILLOP; 5877 return; 5878 } 5879 5880 while (dtrace_gethrtime() - now < val) 5881 continue; 5882 5883 /* 5884 * Normally, we assure that the value of the variable "timestamp" does 5885 * not change within an ECB. The presence of chill() represents an 5886 * exception to this rule, however. 5887 */ 5888 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5889 cpu->cpu_dtrace_chilled += val; 5890 } 5891 5892 static void 5893 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5894 uint64_t *buf, uint64_t arg) 5895 { 5896 int nframes = DTRACE_USTACK_NFRAMES(arg); 5897 int strsize = DTRACE_USTACK_STRSIZE(arg); 5898 uint64_t *pcs = &buf[1], *fps; 5899 char *str = (char *)&pcs[nframes]; 5900 int size, offs = 0, i, j; 5901 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5902 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5903 char *sym; 5904 5905 /* 5906 * Should be taking a faster path if string space has not been 5907 * allocated. 5908 */ 5909 ASSERT(strsize != 0); 5910 5911 /* 5912 * We will first allocate some temporary space for the frame pointers. 5913 */ 5914 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5915 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5916 (nframes * sizeof (uint64_t)); 5917 5918 if (!DTRACE_INSCRATCH(mstate, size)) { 5919 /* 5920 * Not enough room for our frame pointers -- need to indicate 5921 * that we ran out of scratch space. 5922 */ 5923 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5924 return; 5925 } 5926 5927 mstate->dtms_scratch_ptr += size; 5928 saved = mstate->dtms_scratch_ptr; 5929 5930 /* 5931 * Now get a stack with both program counters and frame pointers. 5932 */ 5933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5934 dtrace_getufpstack(buf, fps, nframes + 1); 5935 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5936 5937 /* 5938 * If that faulted, we're cooked. 5939 */ 5940 if (*flags & CPU_DTRACE_FAULT) 5941 goto out; 5942 5943 /* 5944 * Now we want to walk up the stack, calling the USTACK helper. For 5945 * each iteration, we restore the scratch pointer. 5946 */ 5947 for (i = 0; i < nframes; i++) { 5948 mstate->dtms_scratch_ptr = saved; 5949 5950 if (offs >= strsize) 5951 break; 5952 5953 sym = (char *)(uintptr_t)dtrace_helper( 5954 DTRACE_HELPER_ACTION_USTACK, 5955 mstate, state, pcs[i], fps[i]); 5956 5957 /* 5958 * If we faulted while running the helper, we're going to 5959 * clear the fault and null out the corresponding string. 5960 */ 5961 if (*flags & CPU_DTRACE_FAULT) { 5962 *flags &= ~CPU_DTRACE_FAULT; 5963 str[offs++] = '\0'; 5964 continue; 5965 } 5966 5967 if (sym == NULL) { 5968 str[offs++] = '\0'; 5969 continue; 5970 } 5971 5972 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5973 5974 /* 5975 * Now copy in the string that the helper returned to us. 5976 */ 5977 for (j = 0; offs + j < strsize; j++) { 5978 if ((str[offs + j] = sym[j]) == '\0') 5979 break; 5980 } 5981 5982 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5983 5984 offs += j + 1; 5985 } 5986 5987 if (offs >= strsize) { 5988 /* 5989 * If we didn't have room for all of the strings, we don't 5990 * abort processing -- this needn't be a fatal error -- but we 5991 * still want to increment a counter (dts_stkstroverflows) to 5992 * allow this condition to be warned about. (If this is from 5993 * a jstack() action, it is easily tuned via jstackstrsize.) 5994 */ 5995 dtrace_error(&state->dts_stkstroverflows); 5996 } 5997 5998 while (offs < strsize) 5999 str[offs++] = '\0'; 6000 6001 out: 6002 mstate->dtms_scratch_ptr = old; 6003 } 6004 6005 /* 6006 * If you're looking for the epicenter of DTrace, you just found it. This 6007 * is the function called by the provider to fire a probe -- from which all 6008 * subsequent probe-context DTrace activity emanates. 6009 */ 6010 void 6011 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6012 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6013 { 6014 processorid_t cpuid; 6015 dtrace_icookie_t cookie; 6016 dtrace_probe_t *probe; 6017 dtrace_mstate_t mstate; 6018 dtrace_ecb_t *ecb; 6019 dtrace_action_t *act; 6020 intptr_t offs; 6021 size_t size; 6022 int vtime, onintr; 6023 volatile uint16_t *flags; 6024 hrtime_t now; 6025 6026 if (panicstr != NULL) 6027 return; 6028 6029 #if defined(sun) 6030 /* 6031 * Kick out immediately if this CPU is still being born (in which case 6032 * curthread will be set to -1) or the current thread can't allow 6033 * probes in its current context. 6034 */ 6035 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6036 return; 6037 #endif 6038 6039 cookie = dtrace_interrupt_disable(); 6040 probe = dtrace_probes[id - 1]; 6041 cpuid = curcpu; 6042 onintr = CPU_ON_INTR(CPU); 6043 6044 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6045 probe->dtpr_predcache == curthread->t_predcache) { 6046 /* 6047 * We have hit in the predicate cache; we know that 6048 * this predicate would evaluate to be false. 6049 */ 6050 dtrace_interrupt_enable(cookie); 6051 return; 6052 } 6053 6054 #if defined(sun) 6055 if (panic_quiesce) { 6056 #else 6057 if (panicstr != NULL) { 6058 #endif 6059 /* 6060 * We don't trace anything if we're panicking. 6061 */ 6062 dtrace_interrupt_enable(cookie); 6063 return; 6064 } 6065 6066 now = dtrace_gethrtime(); 6067 vtime = dtrace_vtime_references != 0; 6068 6069 if (vtime && curthread->t_dtrace_start) 6070 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6071 6072 mstate.dtms_difo = NULL; 6073 mstate.dtms_probe = probe; 6074 mstate.dtms_strtok = 0; 6075 mstate.dtms_arg[0] = arg0; 6076 mstate.dtms_arg[1] = arg1; 6077 mstate.dtms_arg[2] = arg2; 6078 mstate.dtms_arg[3] = arg3; 6079 mstate.dtms_arg[4] = arg4; 6080 6081 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6082 6083 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6084 dtrace_predicate_t *pred = ecb->dte_predicate; 6085 dtrace_state_t *state = ecb->dte_state; 6086 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6087 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6088 dtrace_vstate_t *vstate = &state->dts_vstate; 6089 dtrace_provider_t *prov = probe->dtpr_provider; 6090 uint64_t tracememsize = 0; 6091 int committed = 0; 6092 caddr_t tomax; 6093 6094 /* 6095 * A little subtlety with the following (seemingly innocuous) 6096 * declaration of the automatic 'val': by looking at the 6097 * code, you might think that it could be declared in the 6098 * action processing loop, below. (That is, it's only used in 6099 * the action processing loop.) However, it must be declared 6100 * out of that scope because in the case of DIF expression 6101 * arguments to aggregating actions, one iteration of the 6102 * action loop will use the last iteration's value. 6103 */ 6104 uint64_t val = 0; 6105 6106 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6107 *flags &= ~CPU_DTRACE_ERROR; 6108 6109 if (prov == dtrace_provider) { 6110 /* 6111 * If dtrace itself is the provider of this probe, 6112 * we're only going to continue processing the ECB if 6113 * arg0 (the dtrace_state_t) is equal to the ECB's 6114 * creating state. (This prevents disjoint consumers 6115 * from seeing one another's metaprobes.) 6116 */ 6117 if (arg0 != (uint64_t)(uintptr_t)state) 6118 continue; 6119 } 6120 6121 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6122 /* 6123 * We're not currently active. If our provider isn't 6124 * the dtrace pseudo provider, we're not interested. 6125 */ 6126 if (prov != dtrace_provider) 6127 continue; 6128 6129 /* 6130 * Now we must further check if we are in the BEGIN 6131 * probe. If we are, we will only continue processing 6132 * if we're still in WARMUP -- if one BEGIN enabling 6133 * has invoked the exit() action, we don't want to 6134 * evaluate subsequent BEGIN enablings. 6135 */ 6136 if (probe->dtpr_id == dtrace_probeid_begin && 6137 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6138 ASSERT(state->dts_activity == 6139 DTRACE_ACTIVITY_DRAINING); 6140 continue; 6141 } 6142 } 6143 6144 if (ecb->dte_cond) { 6145 /* 6146 * If the dte_cond bits indicate that this 6147 * consumer is only allowed to see user-mode firings 6148 * of this probe, call the provider's dtps_usermode() 6149 * entry point to check that the probe was fired 6150 * while in a user context. Skip this ECB if that's 6151 * not the case. 6152 */ 6153 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6154 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6155 probe->dtpr_id, probe->dtpr_arg) == 0) 6156 continue; 6157 6158 #if defined(sun) 6159 /* 6160 * This is more subtle than it looks. We have to be 6161 * absolutely certain that CRED() isn't going to 6162 * change out from under us so it's only legit to 6163 * examine that structure if we're in constrained 6164 * situations. Currently, the only times we'll this 6165 * check is if a non-super-user has enabled the 6166 * profile or syscall providers -- providers that 6167 * allow visibility of all processes. For the 6168 * profile case, the check above will ensure that 6169 * we're examining a user context. 6170 */ 6171 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6172 cred_t *cr; 6173 cred_t *s_cr = 6174 ecb->dte_state->dts_cred.dcr_cred; 6175 proc_t *proc; 6176 6177 ASSERT(s_cr != NULL); 6178 6179 if ((cr = CRED()) == NULL || 6180 s_cr->cr_uid != cr->cr_uid || 6181 s_cr->cr_uid != cr->cr_ruid || 6182 s_cr->cr_uid != cr->cr_suid || 6183 s_cr->cr_gid != cr->cr_gid || 6184 s_cr->cr_gid != cr->cr_rgid || 6185 s_cr->cr_gid != cr->cr_sgid || 6186 (proc = ttoproc(curthread)) == NULL || 6187 (proc->p_flag & SNOCD)) 6188 continue; 6189 } 6190 6191 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6192 cred_t *cr; 6193 cred_t *s_cr = 6194 ecb->dte_state->dts_cred.dcr_cred; 6195 6196 ASSERT(s_cr != NULL); 6197 6198 if ((cr = CRED()) == NULL || 6199 s_cr->cr_zone->zone_id != 6200 cr->cr_zone->zone_id) 6201 continue; 6202 } 6203 #endif 6204 } 6205 6206 if (now - state->dts_alive > dtrace_deadman_timeout) { 6207 /* 6208 * We seem to be dead. Unless we (a) have kernel 6209 * destructive permissions (b) have expicitly enabled 6210 * destructive actions and (c) destructive actions have 6211 * not been disabled, we're going to transition into 6212 * the KILLED state, from which no further processing 6213 * on this state will be performed. 6214 */ 6215 if (!dtrace_priv_kernel_destructive(state) || 6216 !state->dts_cred.dcr_destructive || 6217 dtrace_destructive_disallow) { 6218 void *activity = &state->dts_activity; 6219 dtrace_activity_t current; 6220 6221 do { 6222 current = state->dts_activity; 6223 } while (dtrace_cas32(activity, current, 6224 DTRACE_ACTIVITY_KILLED) != current); 6225 6226 continue; 6227 } 6228 } 6229 6230 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6231 ecb->dte_alignment, state, &mstate)) < 0) 6232 continue; 6233 6234 tomax = buf->dtb_tomax; 6235 ASSERT(tomax != NULL); 6236 6237 if (ecb->dte_size != 0) 6238 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6239 6240 mstate.dtms_epid = ecb->dte_epid; 6241 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6242 6243 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6244 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6245 else 6246 mstate.dtms_access = 0; 6247 6248 if (pred != NULL) { 6249 dtrace_difo_t *dp = pred->dtp_difo; 6250 int rval; 6251 6252 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6253 6254 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6255 dtrace_cacheid_t cid = probe->dtpr_predcache; 6256 6257 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6258 /* 6259 * Update the predicate cache... 6260 */ 6261 ASSERT(cid == pred->dtp_cacheid); 6262 curthread->t_predcache = cid; 6263 } 6264 6265 continue; 6266 } 6267 } 6268 6269 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6270 act != NULL; act = act->dta_next) { 6271 size_t valoffs; 6272 dtrace_difo_t *dp; 6273 dtrace_recdesc_t *rec = &act->dta_rec; 6274 6275 size = rec->dtrd_size; 6276 valoffs = offs + rec->dtrd_offset; 6277 6278 if (DTRACEACT_ISAGG(act->dta_kind)) { 6279 uint64_t v = 0xbad; 6280 dtrace_aggregation_t *agg; 6281 6282 agg = (dtrace_aggregation_t *)act; 6283 6284 if ((dp = act->dta_difo) != NULL) 6285 v = dtrace_dif_emulate(dp, 6286 &mstate, vstate, state); 6287 6288 if (*flags & CPU_DTRACE_ERROR) 6289 continue; 6290 6291 /* 6292 * Note that we always pass the expression 6293 * value from the previous iteration of the 6294 * action loop. This value will only be used 6295 * if there is an expression argument to the 6296 * aggregating action, denoted by the 6297 * dtag_hasarg field. 6298 */ 6299 dtrace_aggregate(agg, buf, 6300 offs, aggbuf, v, val); 6301 continue; 6302 } 6303 6304 switch (act->dta_kind) { 6305 case DTRACEACT_STOP: 6306 if (dtrace_priv_proc_destructive(state)) 6307 dtrace_action_stop(); 6308 continue; 6309 6310 case DTRACEACT_BREAKPOINT: 6311 if (dtrace_priv_kernel_destructive(state)) 6312 dtrace_action_breakpoint(ecb); 6313 continue; 6314 6315 case DTRACEACT_PANIC: 6316 if (dtrace_priv_kernel_destructive(state)) 6317 dtrace_action_panic(ecb); 6318 continue; 6319 6320 case DTRACEACT_STACK: 6321 if (!dtrace_priv_kernel(state)) 6322 continue; 6323 6324 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6325 size / sizeof (pc_t), probe->dtpr_aframes, 6326 DTRACE_ANCHORED(probe) ? NULL : 6327 (uint32_t *)arg0); 6328 continue; 6329 6330 case DTRACEACT_JSTACK: 6331 case DTRACEACT_USTACK: 6332 if (!dtrace_priv_proc(state)) 6333 continue; 6334 6335 /* 6336 * See comment in DIF_VAR_PID. 6337 */ 6338 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6339 CPU_ON_INTR(CPU)) { 6340 int depth = DTRACE_USTACK_NFRAMES( 6341 rec->dtrd_arg) + 1; 6342 6343 dtrace_bzero((void *)(tomax + valoffs), 6344 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6345 + depth * sizeof (uint64_t)); 6346 6347 continue; 6348 } 6349 6350 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6351 curproc->p_dtrace_helpers != NULL) { 6352 /* 6353 * This is the slow path -- we have 6354 * allocated string space, and we're 6355 * getting the stack of a process that 6356 * has helpers. Call into a separate 6357 * routine to perform this processing. 6358 */ 6359 dtrace_action_ustack(&mstate, state, 6360 (uint64_t *)(tomax + valoffs), 6361 rec->dtrd_arg); 6362 continue; 6363 } 6364 6365 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6366 dtrace_getupcstack((uint64_t *) 6367 (tomax + valoffs), 6368 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6369 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6370 continue; 6371 6372 default: 6373 break; 6374 } 6375 6376 dp = act->dta_difo; 6377 ASSERT(dp != NULL); 6378 6379 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6380 6381 if (*flags & CPU_DTRACE_ERROR) 6382 continue; 6383 6384 switch (act->dta_kind) { 6385 case DTRACEACT_SPECULATE: 6386 ASSERT(buf == &state->dts_buffer[cpuid]); 6387 buf = dtrace_speculation_buffer(state, 6388 cpuid, val); 6389 6390 if (buf == NULL) { 6391 *flags |= CPU_DTRACE_DROP; 6392 continue; 6393 } 6394 6395 offs = dtrace_buffer_reserve(buf, 6396 ecb->dte_needed, ecb->dte_alignment, 6397 state, NULL); 6398 6399 if (offs < 0) { 6400 *flags |= CPU_DTRACE_DROP; 6401 continue; 6402 } 6403 6404 tomax = buf->dtb_tomax; 6405 ASSERT(tomax != NULL); 6406 6407 if (ecb->dte_size != 0) 6408 DTRACE_STORE(uint32_t, tomax, offs, 6409 ecb->dte_epid); 6410 continue; 6411 6412 case DTRACEACT_PRINTM: { 6413 /* The DIF returns a 'memref'. */ 6414 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6415 6416 /* Get the size from the memref. */ 6417 size = memref[1]; 6418 6419 /* 6420 * Check if the size exceeds the allocated 6421 * buffer size. 6422 */ 6423 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6424 /* Flag a drop! */ 6425 *flags |= CPU_DTRACE_DROP; 6426 continue; 6427 } 6428 6429 /* Store the size in the buffer first. */ 6430 DTRACE_STORE(uintptr_t, tomax, 6431 valoffs, size); 6432 6433 /* 6434 * Offset the buffer address to the start 6435 * of the data. 6436 */ 6437 valoffs += sizeof(uintptr_t); 6438 6439 /* 6440 * Reset to the memory address rather than 6441 * the memref array, then let the BYREF 6442 * code below do the work to store the 6443 * memory data in the buffer. 6444 */ 6445 val = memref[0]; 6446 break; 6447 } 6448 6449 case DTRACEACT_PRINTT: { 6450 /* The DIF returns a 'typeref'. */ 6451 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6452 char c = '\0' + 1; 6453 size_t s; 6454 6455 /* 6456 * Get the type string length and round it 6457 * up so that the data that follows is 6458 * aligned for easy access. 6459 */ 6460 size_t typs = strlen((char *) typeref[2]) + 1; 6461 typs = roundup(typs, sizeof(uintptr_t)); 6462 6463 /* 6464 *Get the size from the typeref using the 6465 * number of elements and the type size. 6466 */ 6467 size = typeref[1] * typeref[3]; 6468 6469 /* 6470 * Check if the size exceeds the allocated 6471 * buffer size. 6472 */ 6473 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6474 /* Flag a drop! */ 6475 *flags |= CPU_DTRACE_DROP; 6476 6477 } 6478 6479 /* Store the size in the buffer first. */ 6480 DTRACE_STORE(uintptr_t, tomax, 6481 valoffs, size); 6482 valoffs += sizeof(uintptr_t); 6483 6484 /* Store the type size in the buffer. */ 6485 DTRACE_STORE(uintptr_t, tomax, 6486 valoffs, typeref[3]); 6487 valoffs += sizeof(uintptr_t); 6488 6489 val = typeref[2]; 6490 6491 for (s = 0; s < typs; s++) { 6492 if (c != '\0') 6493 c = dtrace_load8(val++); 6494 6495 DTRACE_STORE(uint8_t, tomax, 6496 valoffs++, c); 6497 } 6498 6499 /* 6500 * Reset to the memory address rather than 6501 * the typeref array, then let the BYREF 6502 * code below do the work to store the 6503 * memory data in the buffer. 6504 */ 6505 val = typeref[0]; 6506 break; 6507 } 6508 6509 case DTRACEACT_CHILL: 6510 if (dtrace_priv_kernel_destructive(state)) 6511 dtrace_action_chill(&mstate, val); 6512 continue; 6513 6514 case DTRACEACT_RAISE: 6515 if (dtrace_priv_proc_destructive(state)) 6516 dtrace_action_raise(val); 6517 continue; 6518 6519 case DTRACEACT_COMMIT: 6520 ASSERT(!committed); 6521 6522 /* 6523 * We need to commit our buffer state. 6524 */ 6525 if (ecb->dte_size) 6526 buf->dtb_offset = offs + ecb->dte_size; 6527 buf = &state->dts_buffer[cpuid]; 6528 dtrace_speculation_commit(state, cpuid, val); 6529 committed = 1; 6530 continue; 6531 6532 case DTRACEACT_DISCARD: 6533 dtrace_speculation_discard(state, cpuid, val); 6534 continue; 6535 6536 case DTRACEACT_DIFEXPR: 6537 case DTRACEACT_LIBACT: 6538 case DTRACEACT_PRINTF: 6539 case DTRACEACT_PRINTA: 6540 case DTRACEACT_SYSTEM: 6541 case DTRACEACT_FREOPEN: 6542 case DTRACEACT_TRACEMEM: 6543 break; 6544 6545 case DTRACEACT_TRACEMEM_DYNSIZE: 6546 tracememsize = val; 6547 break; 6548 6549 case DTRACEACT_SYM: 6550 case DTRACEACT_MOD: 6551 if (!dtrace_priv_kernel(state)) 6552 continue; 6553 break; 6554 6555 case DTRACEACT_USYM: 6556 case DTRACEACT_UMOD: 6557 case DTRACEACT_UADDR: { 6558 #if defined(sun) 6559 struct pid *pid = curthread->t_procp->p_pidp; 6560 #endif 6561 6562 if (!dtrace_priv_proc(state)) 6563 continue; 6564 6565 DTRACE_STORE(uint64_t, tomax, 6566 #if defined(sun) 6567 valoffs, (uint64_t)pid->pid_id); 6568 #else 6569 valoffs, (uint64_t) curproc->p_pid); 6570 #endif 6571 DTRACE_STORE(uint64_t, tomax, 6572 valoffs + sizeof (uint64_t), val); 6573 6574 continue; 6575 } 6576 6577 case DTRACEACT_EXIT: { 6578 /* 6579 * For the exit action, we are going to attempt 6580 * to atomically set our activity to be 6581 * draining. If this fails (either because 6582 * another CPU has beat us to the exit action, 6583 * or because our current activity is something 6584 * other than ACTIVE or WARMUP), we will 6585 * continue. This assures that the exit action 6586 * can be successfully recorded at most once 6587 * when we're in the ACTIVE state. If we're 6588 * encountering the exit() action while in 6589 * COOLDOWN, however, we want to honor the new 6590 * status code. (We know that we're the only 6591 * thread in COOLDOWN, so there is no race.) 6592 */ 6593 void *activity = &state->dts_activity; 6594 dtrace_activity_t current = state->dts_activity; 6595 6596 if (current == DTRACE_ACTIVITY_COOLDOWN) 6597 break; 6598 6599 if (current != DTRACE_ACTIVITY_WARMUP) 6600 current = DTRACE_ACTIVITY_ACTIVE; 6601 6602 if (dtrace_cas32(activity, current, 6603 DTRACE_ACTIVITY_DRAINING) != current) { 6604 *flags |= CPU_DTRACE_DROP; 6605 continue; 6606 } 6607 6608 break; 6609 } 6610 6611 default: 6612 ASSERT(0); 6613 } 6614 6615 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6616 uintptr_t end = valoffs + size; 6617 6618 if (tracememsize != 0 && 6619 valoffs + tracememsize < end) { 6620 end = valoffs + tracememsize; 6621 tracememsize = 0; 6622 } 6623 6624 if (!dtrace_vcanload((void *)(uintptr_t)val, 6625 &dp->dtdo_rtype, &mstate, vstate)) 6626 continue; 6627 6628 /* 6629 * If this is a string, we're going to only 6630 * load until we find the zero byte -- after 6631 * which we'll store zero bytes. 6632 */ 6633 if (dp->dtdo_rtype.dtdt_kind == 6634 DIF_TYPE_STRING) { 6635 char c = '\0' + 1; 6636 int intuple = act->dta_intuple; 6637 size_t s; 6638 6639 for (s = 0; s < size; s++) { 6640 if (c != '\0') 6641 c = dtrace_load8(val++); 6642 6643 DTRACE_STORE(uint8_t, tomax, 6644 valoffs++, c); 6645 6646 if (c == '\0' && intuple) 6647 break; 6648 } 6649 6650 continue; 6651 } 6652 6653 while (valoffs < end) { 6654 DTRACE_STORE(uint8_t, tomax, valoffs++, 6655 dtrace_load8(val++)); 6656 } 6657 6658 continue; 6659 } 6660 6661 switch (size) { 6662 case 0: 6663 break; 6664 6665 case sizeof (uint8_t): 6666 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6667 break; 6668 case sizeof (uint16_t): 6669 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6670 break; 6671 case sizeof (uint32_t): 6672 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6673 break; 6674 case sizeof (uint64_t): 6675 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6676 break; 6677 default: 6678 /* 6679 * Any other size should have been returned by 6680 * reference, not by value. 6681 */ 6682 ASSERT(0); 6683 break; 6684 } 6685 } 6686 6687 if (*flags & CPU_DTRACE_DROP) 6688 continue; 6689 6690 if (*flags & CPU_DTRACE_FAULT) { 6691 int ndx; 6692 dtrace_action_t *err; 6693 6694 buf->dtb_errors++; 6695 6696 if (probe->dtpr_id == dtrace_probeid_error) { 6697 /* 6698 * There's nothing we can do -- we had an 6699 * error on the error probe. We bump an 6700 * error counter to at least indicate that 6701 * this condition happened. 6702 */ 6703 dtrace_error(&state->dts_dblerrors); 6704 continue; 6705 } 6706 6707 if (vtime) { 6708 /* 6709 * Before recursing on dtrace_probe(), we 6710 * need to explicitly clear out our start 6711 * time to prevent it from being accumulated 6712 * into t_dtrace_vtime. 6713 */ 6714 curthread->t_dtrace_start = 0; 6715 } 6716 6717 /* 6718 * Iterate over the actions to figure out which action 6719 * we were processing when we experienced the error. 6720 * Note that act points _past_ the faulting action; if 6721 * act is ecb->dte_action, the fault was in the 6722 * predicate, if it's ecb->dte_action->dta_next it's 6723 * in action #1, and so on. 6724 */ 6725 for (err = ecb->dte_action, ndx = 0; 6726 err != act; err = err->dta_next, ndx++) 6727 continue; 6728 6729 dtrace_probe_error(state, ecb->dte_epid, ndx, 6730 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6731 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6732 cpu_core[cpuid].cpuc_dtrace_illval); 6733 6734 continue; 6735 } 6736 6737 if (!committed) 6738 buf->dtb_offset = offs + ecb->dte_size; 6739 } 6740 6741 if (vtime) 6742 curthread->t_dtrace_start = dtrace_gethrtime(); 6743 6744 dtrace_interrupt_enable(cookie); 6745 } 6746 6747 /* 6748 * DTrace Probe Hashing Functions 6749 * 6750 * The functions in this section (and indeed, the functions in remaining 6751 * sections) are not _called_ from probe context. (Any exceptions to this are 6752 * marked with a "Note:".) Rather, they are called from elsewhere in the 6753 * DTrace framework to look-up probes in, add probes to and remove probes from 6754 * the DTrace probe hashes. (Each probe is hashed by each element of the 6755 * probe tuple -- allowing for fast lookups, regardless of what was 6756 * specified.) 6757 */ 6758 static uint_t 6759 dtrace_hash_str(const char *p) 6760 { 6761 unsigned int g; 6762 uint_t hval = 0; 6763 6764 while (*p) { 6765 hval = (hval << 4) + *p++; 6766 if ((g = (hval & 0xf0000000)) != 0) 6767 hval ^= g >> 24; 6768 hval &= ~g; 6769 } 6770 return (hval); 6771 } 6772 6773 static dtrace_hash_t * 6774 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6775 { 6776 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6777 6778 hash->dth_stroffs = stroffs; 6779 hash->dth_nextoffs = nextoffs; 6780 hash->dth_prevoffs = prevoffs; 6781 6782 hash->dth_size = 1; 6783 hash->dth_mask = hash->dth_size - 1; 6784 6785 hash->dth_tab = kmem_zalloc(hash->dth_size * 6786 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6787 6788 return (hash); 6789 } 6790 6791 static void 6792 dtrace_hash_destroy(dtrace_hash_t *hash) 6793 { 6794 #ifdef DEBUG 6795 int i; 6796 6797 for (i = 0; i < hash->dth_size; i++) 6798 ASSERT(hash->dth_tab[i] == NULL); 6799 #endif 6800 6801 kmem_free(hash->dth_tab, 6802 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6803 kmem_free(hash, sizeof (dtrace_hash_t)); 6804 } 6805 6806 static void 6807 dtrace_hash_resize(dtrace_hash_t *hash) 6808 { 6809 int size = hash->dth_size, i, ndx; 6810 int new_size = hash->dth_size << 1; 6811 int new_mask = new_size - 1; 6812 dtrace_hashbucket_t **new_tab, *bucket, *next; 6813 6814 ASSERT((new_size & new_mask) == 0); 6815 6816 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6817 6818 for (i = 0; i < size; i++) { 6819 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6820 dtrace_probe_t *probe = bucket->dthb_chain; 6821 6822 ASSERT(probe != NULL); 6823 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6824 6825 next = bucket->dthb_next; 6826 bucket->dthb_next = new_tab[ndx]; 6827 new_tab[ndx] = bucket; 6828 } 6829 } 6830 6831 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6832 hash->dth_tab = new_tab; 6833 hash->dth_size = new_size; 6834 hash->dth_mask = new_mask; 6835 } 6836 6837 static void 6838 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6839 { 6840 int hashval = DTRACE_HASHSTR(hash, new); 6841 int ndx = hashval & hash->dth_mask; 6842 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6843 dtrace_probe_t **nextp, **prevp; 6844 6845 for (; bucket != NULL; bucket = bucket->dthb_next) { 6846 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6847 goto add; 6848 } 6849 6850 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6851 dtrace_hash_resize(hash); 6852 dtrace_hash_add(hash, new); 6853 return; 6854 } 6855 6856 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6857 bucket->dthb_next = hash->dth_tab[ndx]; 6858 hash->dth_tab[ndx] = bucket; 6859 hash->dth_nbuckets++; 6860 6861 add: 6862 nextp = DTRACE_HASHNEXT(hash, new); 6863 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6864 *nextp = bucket->dthb_chain; 6865 6866 if (bucket->dthb_chain != NULL) { 6867 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6868 ASSERT(*prevp == NULL); 6869 *prevp = new; 6870 } 6871 6872 bucket->dthb_chain = new; 6873 bucket->dthb_len++; 6874 } 6875 6876 static dtrace_probe_t * 6877 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6878 { 6879 int hashval = DTRACE_HASHSTR(hash, template); 6880 int ndx = hashval & hash->dth_mask; 6881 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6882 6883 for (; bucket != NULL; bucket = bucket->dthb_next) { 6884 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6885 return (bucket->dthb_chain); 6886 } 6887 6888 return (NULL); 6889 } 6890 6891 static int 6892 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6893 { 6894 int hashval = DTRACE_HASHSTR(hash, template); 6895 int ndx = hashval & hash->dth_mask; 6896 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6897 6898 for (; bucket != NULL; bucket = bucket->dthb_next) { 6899 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6900 return (bucket->dthb_len); 6901 } 6902 6903 return (0); 6904 } 6905 6906 static void 6907 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6908 { 6909 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6910 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6911 6912 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6913 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6914 6915 /* 6916 * Find the bucket that we're removing this probe from. 6917 */ 6918 for (; bucket != NULL; bucket = bucket->dthb_next) { 6919 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6920 break; 6921 } 6922 6923 ASSERT(bucket != NULL); 6924 6925 if (*prevp == NULL) { 6926 if (*nextp == NULL) { 6927 /* 6928 * The removed probe was the only probe on this 6929 * bucket; we need to remove the bucket. 6930 */ 6931 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6932 6933 ASSERT(bucket->dthb_chain == probe); 6934 ASSERT(b != NULL); 6935 6936 if (b == bucket) { 6937 hash->dth_tab[ndx] = bucket->dthb_next; 6938 } else { 6939 while (b->dthb_next != bucket) 6940 b = b->dthb_next; 6941 b->dthb_next = bucket->dthb_next; 6942 } 6943 6944 ASSERT(hash->dth_nbuckets > 0); 6945 hash->dth_nbuckets--; 6946 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6947 return; 6948 } 6949 6950 bucket->dthb_chain = *nextp; 6951 } else { 6952 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6953 } 6954 6955 if (*nextp != NULL) 6956 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6957 } 6958 6959 /* 6960 * DTrace Utility Functions 6961 * 6962 * These are random utility functions that are _not_ called from probe context. 6963 */ 6964 static int 6965 dtrace_badattr(const dtrace_attribute_t *a) 6966 { 6967 return (a->dtat_name > DTRACE_STABILITY_MAX || 6968 a->dtat_data > DTRACE_STABILITY_MAX || 6969 a->dtat_class > DTRACE_CLASS_MAX); 6970 } 6971 6972 /* 6973 * Return a duplicate copy of a string. If the specified string is NULL, 6974 * this function returns a zero-length string. 6975 */ 6976 static char * 6977 dtrace_strdup(const char *str) 6978 { 6979 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6980 6981 if (str != NULL) 6982 (void) strcpy(new, str); 6983 6984 return (new); 6985 } 6986 6987 #define DTRACE_ISALPHA(c) \ 6988 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6989 6990 static int 6991 dtrace_badname(const char *s) 6992 { 6993 char c; 6994 6995 if (s == NULL || (c = *s++) == '\0') 6996 return (0); 6997 6998 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6999 return (1); 7000 7001 while ((c = *s++) != '\0') { 7002 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7003 c != '-' && c != '_' && c != '.' && c != '`') 7004 return (1); 7005 } 7006 7007 return (0); 7008 } 7009 7010 static void 7011 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7012 { 7013 uint32_t priv; 7014 7015 #if defined(sun) 7016 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7017 /* 7018 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7019 */ 7020 priv = DTRACE_PRIV_ALL; 7021 } else { 7022 *uidp = crgetuid(cr); 7023 *zoneidp = crgetzoneid(cr); 7024 7025 priv = 0; 7026 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7027 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7028 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7029 priv |= DTRACE_PRIV_USER; 7030 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7031 priv |= DTRACE_PRIV_PROC; 7032 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7033 priv |= DTRACE_PRIV_OWNER; 7034 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7035 priv |= DTRACE_PRIV_ZONEOWNER; 7036 } 7037 #else 7038 priv = DTRACE_PRIV_ALL; 7039 #endif 7040 7041 *privp = priv; 7042 } 7043 7044 #ifdef DTRACE_ERRDEBUG 7045 static void 7046 dtrace_errdebug(const char *str) 7047 { 7048 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7049 int occupied = 0; 7050 7051 mutex_enter(&dtrace_errlock); 7052 dtrace_errlast = str; 7053 dtrace_errthread = curthread; 7054 7055 while (occupied++ < DTRACE_ERRHASHSZ) { 7056 if (dtrace_errhash[hval].dter_msg == str) { 7057 dtrace_errhash[hval].dter_count++; 7058 goto out; 7059 } 7060 7061 if (dtrace_errhash[hval].dter_msg != NULL) { 7062 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7063 continue; 7064 } 7065 7066 dtrace_errhash[hval].dter_msg = str; 7067 dtrace_errhash[hval].dter_count = 1; 7068 goto out; 7069 } 7070 7071 panic("dtrace: undersized error hash"); 7072 out: 7073 mutex_exit(&dtrace_errlock); 7074 } 7075 #endif 7076 7077 /* 7078 * DTrace Matching Functions 7079 * 7080 * These functions are used to match groups of probes, given some elements of 7081 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7082 */ 7083 static int 7084 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7085 zoneid_t zoneid) 7086 { 7087 if (priv != DTRACE_PRIV_ALL) { 7088 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7089 uint32_t match = priv & ppriv; 7090 7091 /* 7092 * No PRIV_DTRACE_* privileges... 7093 */ 7094 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7095 DTRACE_PRIV_KERNEL)) == 0) 7096 return (0); 7097 7098 /* 7099 * No matching bits, but there were bits to match... 7100 */ 7101 if (match == 0 && ppriv != 0) 7102 return (0); 7103 7104 /* 7105 * Need to have permissions to the process, but don't... 7106 */ 7107 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7108 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7109 return (0); 7110 } 7111 7112 /* 7113 * Need to be in the same zone unless we possess the 7114 * privilege to examine all zones. 7115 */ 7116 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7117 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7118 return (0); 7119 } 7120 } 7121 7122 return (1); 7123 } 7124 7125 /* 7126 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7127 * consists of input pattern strings and an ops-vector to evaluate them. 7128 * This function returns >0 for match, 0 for no match, and <0 for error. 7129 */ 7130 static int 7131 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7132 uint32_t priv, uid_t uid, zoneid_t zoneid) 7133 { 7134 dtrace_provider_t *pvp = prp->dtpr_provider; 7135 int rv; 7136 7137 if (pvp->dtpv_defunct) 7138 return (0); 7139 7140 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7141 return (rv); 7142 7143 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7144 return (rv); 7145 7146 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7147 return (rv); 7148 7149 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7150 return (rv); 7151 7152 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7153 return (0); 7154 7155 return (rv); 7156 } 7157 7158 /* 7159 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7160 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7161 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7162 * In addition, all of the recursion cases except for '*' matching have been 7163 * unwound. For '*', we still implement recursive evaluation, but a depth 7164 * counter is maintained and matching is aborted if we recurse too deep. 7165 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7166 */ 7167 static int 7168 dtrace_match_glob(const char *s, const char *p, int depth) 7169 { 7170 const char *olds; 7171 char s1, c; 7172 int gs; 7173 7174 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7175 return (-1); 7176 7177 if (s == NULL) 7178 s = ""; /* treat NULL as empty string */ 7179 7180 top: 7181 olds = s; 7182 s1 = *s++; 7183 7184 if (p == NULL) 7185 return (0); 7186 7187 if ((c = *p++) == '\0') 7188 return (s1 == '\0'); 7189 7190 switch (c) { 7191 case '[': { 7192 int ok = 0, notflag = 0; 7193 char lc = '\0'; 7194 7195 if (s1 == '\0') 7196 return (0); 7197 7198 if (*p == '!') { 7199 notflag = 1; 7200 p++; 7201 } 7202 7203 if ((c = *p++) == '\0') 7204 return (0); 7205 7206 do { 7207 if (c == '-' && lc != '\0' && *p != ']') { 7208 if ((c = *p++) == '\0') 7209 return (0); 7210 if (c == '\\' && (c = *p++) == '\0') 7211 return (0); 7212 7213 if (notflag) { 7214 if (s1 < lc || s1 > c) 7215 ok++; 7216 else 7217 return (0); 7218 } else if (lc <= s1 && s1 <= c) 7219 ok++; 7220 7221 } else if (c == '\\' && (c = *p++) == '\0') 7222 return (0); 7223 7224 lc = c; /* save left-hand 'c' for next iteration */ 7225 7226 if (notflag) { 7227 if (s1 != c) 7228 ok++; 7229 else 7230 return (0); 7231 } else if (s1 == c) 7232 ok++; 7233 7234 if ((c = *p++) == '\0') 7235 return (0); 7236 7237 } while (c != ']'); 7238 7239 if (ok) 7240 goto top; 7241 7242 return (0); 7243 } 7244 7245 case '\\': 7246 if ((c = *p++) == '\0') 7247 return (0); 7248 /*FALLTHRU*/ 7249 7250 default: 7251 if (c != s1) 7252 return (0); 7253 /*FALLTHRU*/ 7254 7255 case '?': 7256 if (s1 != '\0') 7257 goto top; 7258 return (0); 7259 7260 case '*': 7261 while (*p == '*') 7262 p++; /* consecutive *'s are identical to a single one */ 7263 7264 if (*p == '\0') 7265 return (1); 7266 7267 for (s = olds; *s != '\0'; s++) { 7268 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7269 return (gs); 7270 } 7271 7272 return (0); 7273 } 7274 } 7275 7276 /*ARGSUSED*/ 7277 static int 7278 dtrace_match_string(const char *s, const char *p, int depth) 7279 { 7280 return (s != NULL && strcmp(s, p) == 0); 7281 } 7282 7283 /*ARGSUSED*/ 7284 static int 7285 dtrace_match_nul(const char *s, const char *p, int depth) 7286 { 7287 return (1); /* always match the empty pattern */ 7288 } 7289 7290 /*ARGSUSED*/ 7291 static int 7292 dtrace_match_nonzero(const char *s, const char *p, int depth) 7293 { 7294 return (s != NULL && s[0] != '\0'); 7295 } 7296 7297 static int 7298 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7299 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7300 { 7301 dtrace_probe_t template, *probe; 7302 dtrace_hash_t *hash = NULL; 7303 int len, best = INT_MAX, nmatched = 0; 7304 dtrace_id_t i; 7305 7306 ASSERT(MUTEX_HELD(&dtrace_lock)); 7307 7308 /* 7309 * If the probe ID is specified in the key, just lookup by ID and 7310 * invoke the match callback once if a matching probe is found. 7311 */ 7312 if (pkp->dtpk_id != DTRACE_IDNONE) { 7313 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7314 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7315 (void) (*matched)(probe, arg); 7316 nmatched++; 7317 } 7318 return (nmatched); 7319 } 7320 7321 template.dtpr_mod = (char *)pkp->dtpk_mod; 7322 template.dtpr_func = (char *)pkp->dtpk_func; 7323 template.dtpr_name = (char *)pkp->dtpk_name; 7324 7325 /* 7326 * We want to find the most distinct of the module name, function 7327 * name, and name. So for each one that is not a glob pattern or 7328 * empty string, we perform a lookup in the corresponding hash and 7329 * use the hash table with the fewest collisions to do our search. 7330 */ 7331 if (pkp->dtpk_mmatch == &dtrace_match_string && 7332 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7333 best = len; 7334 hash = dtrace_bymod; 7335 } 7336 7337 if (pkp->dtpk_fmatch == &dtrace_match_string && 7338 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7339 best = len; 7340 hash = dtrace_byfunc; 7341 } 7342 7343 if (pkp->dtpk_nmatch == &dtrace_match_string && 7344 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7345 best = len; 7346 hash = dtrace_byname; 7347 } 7348 7349 /* 7350 * If we did not select a hash table, iterate over every probe and 7351 * invoke our callback for each one that matches our input probe key. 7352 */ 7353 if (hash == NULL) { 7354 for (i = 0; i < dtrace_nprobes; i++) { 7355 if ((probe = dtrace_probes[i]) == NULL || 7356 dtrace_match_probe(probe, pkp, priv, uid, 7357 zoneid) <= 0) 7358 continue; 7359 7360 nmatched++; 7361 7362 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7363 break; 7364 } 7365 7366 return (nmatched); 7367 } 7368 7369 /* 7370 * If we selected a hash table, iterate over each probe of the same key 7371 * name and invoke the callback for every probe that matches the other 7372 * attributes of our input probe key. 7373 */ 7374 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7375 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7376 7377 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7378 continue; 7379 7380 nmatched++; 7381 7382 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7383 break; 7384 } 7385 7386 return (nmatched); 7387 } 7388 7389 /* 7390 * Return the function pointer dtrace_probecmp() should use to compare the 7391 * specified pattern with a string. For NULL or empty patterns, we select 7392 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7393 * For non-empty non-glob strings, we use dtrace_match_string(). 7394 */ 7395 static dtrace_probekey_f * 7396 dtrace_probekey_func(const char *p) 7397 { 7398 char c; 7399 7400 if (p == NULL || *p == '\0') 7401 return (&dtrace_match_nul); 7402 7403 while ((c = *p++) != '\0') { 7404 if (c == '[' || c == '?' || c == '*' || c == '\\') 7405 return (&dtrace_match_glob); 7406 } 7407 7408 return (&dtrace_match_string); 7409 } 7410 7411 /* 7412 * Build a probe comparison key for use with dtrace_match_probe() from the 7413 * given probe description. By convention, a null key only matches anchored 7414 * probes: if each field is the empty string, reset dtpk_fmatch to 7415 * dtrace_match_nonzero(). 7416 */ 7417 static void 7418 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7419 { 7420 pkp->dtpk_prov = pdp->dtpd_provider; 7421 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7422 7423 pkp->dtpk_mod = pdp->dtpd_mod; 7424 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7425 7426 pkp->dtpk_func = pdp->dtpd_func; 7427 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7428 7429 pkp->dtpk_name = pdp->dtpd_name; 7430 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7431 7432 pkp->dtpk_id = pdp->dtpd_id; 7433 7434 if (pkp->dtpk_id == DTRACE_IDNONE && 7435 pkp->dtpk_pmatch == &dtrace_match_nul && 7436 pkp->dtpk_mmatch == &dtrace_match_nul && 7437 pkp->dtpk_fmatch == &dtrace_match_nul && 7438 pkp->dtpk_nmatch == &dtrace_match_nul) 7439 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7440 } 7441 7442 /* 7443 * DTrace Provider-to-Framework API Functions 7444 * 7445 * These functions implement much of the Provider-to-Framework API, as 7446 * described in <sys/dtrace.h>. The parts of the API not in this section are 7447 * the functions in the API for probe management (found below), and 7448 * dtrace_probe() itself (found above). 7449 */ 7450 7451 /* 7452 * Register the calling provider with the DTrace framework. This should 7453 * generally be called by DTrace providers in their attach(9E) entry point. 7454 */ 7455 int 7456 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7457 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7458 { 7459 dtrace_provider_t *provider; 7460 7461 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7462 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7463 "arguments", name ? name : "<NULL>"); 7464 return (EINVAL); 7465 } 7466 7467 if (name[0] == '\0' || dtrace_badname(name)) { 7468 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7469 "provider name", name); 7470 return (EINVAL); 7471 } 7472 7473 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7474 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7475 pops->dtps_destroy == NULL || 7476 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7477 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7478 "provider ops", name); 7479 return (EINVAL); 7480 } 7481 7482 if (dtrace_badattr(&pap->dtpa_provider) || 7483 dtrace_badattr(&pap->dtpa_mod) || 7484 dtrace_badattr(&pap->dtpa_func) || 7485 dtrace_badattr(&pap->dtpa_name) || 7486 dtrace_badattr(&pap->dtpa_args)) { 7487 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7488 "provider attributes", name); 7489 return (EINVAL); 7490 } 7491 7492 if (priv & ~DTRACE_PRIV_ALL) { 7493 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7494 "privilege attributes", name); 7495 return (EINVAL); 7496 } 7497 7498 if ((priv & DTRACE_PRIV_KERNEL) && 7499 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7500 pops->dtps_usermode == NULL) { 7501 cmn_err(CE_WARN, "failed to register provider '%s': need " 7502 "dtps_usermode() op for given privilege attributes", name); 7503 return (EINVAL); 7504 } 7505 7506 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7507 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7508 (void) strcpy(provider->dtpv_name, name); 7509 7510 provider->dtpv_attr = *pap; 7511 provider->dtpv_priv.dtpp_flags = priv; 7512 if (cr != NULL) { 7513 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7514 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7515 } 7516 provider->dtpv_pops = *pops; 7517 7518 if (pops->dtps_provide == NULL) { 7519 ASSERT(pops->dtps_provide_module != NULL); 7520 provider->dtpv_pops.dtps_provide = 7521 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7522 } 7523 7524 if (pops->dtps_provide_module == NULL) { 7525 ASSERT(pops->dtps_provide != NULL); 7526 provider->dtpv_pops.dtps_provide_module = 7527 (void (*)(void *, modctl_t *))dtrace_nullop; 7528 } 7529 7530 if (pops->dtps_suspend == NULL) { 7531 ASSERT(pops->dtps_resume == NULL); 7532 provider->dtpv_pops.dtps_suspend = 7533 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7534 provider->dtpv_pops.dtps_resume = 7535 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7536 } 7537 7538 provider->dtpv_arg = arg; 7539 *idp = (dtrace_provider_id_t)provider; 7540 7541 if (pops == &dtrace_provider_ops) { 7542 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7543 ASSERT(MUTEX_HELD(&dtrace_lock)); 7544 ASSERT(dtrace_anon.dta_enabling == NULL); 7545 7546 /* 7547 * We make sure that the DTrace provider is at the head of 7548 * the provider chain. 7549 */ 7550 provider->dtpv_next = dtrace_provider; 7551 dtrace_provider = provider; 7552 return (0); 7553 } 7554 7555 mutex_enter(&dtrace_provider_lock); 7556 mutex_enter(&dtrace_lock); 7557 7558 /* 7559 * If there is at least one provider registered, we'll add this 7560 * provider after the first provider. 7561 */ 7562 if (dtrace_provider != NULL) { 7563 provider->dtpv_next = dtrace_provider->dtpv_next; 7564 dtrace_provider->dtpv_next = provider; 7565 } else { 7566 dtrace_provider = provider; 7567 } 7568 7569 if (dtrace_retained != NULL) { 7570 dtrace_enabling_provide(provider); 7571 7572 /* 7573 * Now we need to call dtrace_enabling_matchall() -- which 7574 * will acquire cpu_lock and dtrace_lock. We therefore need 7575 * to drop all of our locks before calling into it... 7576 */ 7577 mutex_exit(&dtrace_lock); 7578 mutex_exit(&dtrace_provider_lock); 7579 dtrace_enabling_matchall(); 7580 7581 return (0); 7582 } 7583 7584 mutex_exit(&dtrace_lock); 7585 mutex_exit(&dtrace_provider_lock); 7586 7587 return (0); 7588 } 7589 7590 /* 7591 * Unregister the specified provider from the DTrace framework. This should 7592 * generally be called by DTrace providers in their detach(9E) entry point. 7593 */ 7594 int 7595 dtrace_unregister(dtrace_provider_id_t id) 7596 { 7597 dtrace_provider_t *old = (dtrace_provider_t *)id; 7598 dtrace_provider_t *prev = NULL; 7599 int i, self = 0, noreap = 0; 7600 dtrace_probe_t *probe, *first = NULL; 7601 7602 if (old->dtpv_pops.dtps_enable == 7603 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7604 /* 7605 * If DTrace itself is the provider, we're called with locks 7606 * already held. 7607 */ 7608 ASSERT(old == dtrace_provider); 7609 #if defined(sun) 7610 ASSERT(dtrace_devi != NULL); 7611 #endif 7612 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7613 ASSERT(MUTEX_HELD(&dtrace_lock)); 7614 self = 1; 7615 7616 if (dtrace_provider->dtpv_next != NULL) { 7617 /* 7618 * There's another provider here; return failure. 7619 */ 7620 return (EBUSY); 7621 } 7622 } else { 7623 mutex_enter(&dtrace_provider_lock); 7624 mutex_enter(&mod_lock); 7625 mutex_enter(&dtrace_lock); 7626 } 7627 7628 /* 7629 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7630 * probes, we refuse to let providers slither away, unless this 7631 * provider has already been explicitly invalidated. 7632 */ 7633 if (!old->dtpv_defunct && 7634 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7635 dtrace_anon.dta_state->dts_necbs > 0))) { 7636 if (!self) { 7637 mutex_exit(&dtrace_lock); 7638 mutex_exit(&mod_lock); 7639 mutex_exit(&dtrace_provider_lock); 7640 } 7641 return (EBUSY); 7642 } 7643 7644 /* 7645 * Attempt to destroy the probes associated with this provider. 7646 */ 7647 for (i = 0; i < dtrace_nprobes; i++) { 7648 if ((probe = dtrace_probes[i]) == NULL) 7649 continue; 7650 7651 if (probe->dtpr_provider != old) 7652 continue; 7653 7654 if (probe->dtpr_ecb == NULL) 7655 continue; 7656 7657 /* 7658 * If we are trying to unregister a defunct provider, and the 7659 * provider was made defunct within the interval dictated by 7660 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7661 * attempt to reap our enablings. To denote that the provider 7662 * should reattempt to unregister itself at some point in the 7663 * future, we will return a differentiable error code (EAGAIN 7664 * instead of EBUSY) in this case. 7665 */ 7666 if (dtrace_gethrtime() - old->dtpv_defunct > 7667 dtrace_unregister_defunct_reap) 7668 noreap = 1; 7669 7670 if (!self) { 7671 mutex_exit(&dtrace_lock); 7672 mutex_exit(&mod_lock); 7673 mutex_exit(&dtrace_provider_lock); 7674 } 7675 7676 if (noreap) 7677 return (EBUSY); 7678 7679 (void) taskq_dispatch(dtrace_taskq, 7680 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7681 7682 return (EAGAIN); 7683 } 7684 7685 /* 7686 * All of the probes for this provider are disabled; we can safely 7687 * remove all of them from their hash chains and from the probe array. 7688 */ 7689 for (i = 0; i < dtrace_nprobes; i++) { 7690 if ((probe = dtrace_probes[i]) == NULL) 7691 continue; 7692 7693 if (probe->dtpr_provider != old) 7694 continue; 7695 7696 dtrace_probes[i] = NULL; 7697 7698 dtrace_hash_remove(dtrace_bymod, probe); 7699 dtrace_hash_remove(dtrace_byfunc, probe); 7700 dtrace_hash_remove(dtrace_byname, probe); 7701 7702 if (first == NULL) { 7703 first = probe; 7704 probe->dtpr_nextmod = NULL; 7705 } else { 7706 probe->dtpr_nextmod = first; 7707 first = probe; 7708 } 7709 } 7710 7711 /* 7712 * The provider's probes have been removed from the hash chains and 7713 * from the probe array. Now issue a dtrace_sync() to be sure that 7714 * everyone has cleared out from any probe array processing. 7715 */ 7716 dtrace_sync(); 7717 7718 for (probe = first; probe != NULL; probe = first) { 7719 first = probe->dtpr_nextmod; 7720 7721 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7722 probe->dtpr_arg); 7723 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7724 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7725 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7726 #if defined(sun) 7727 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7728 #else 7729 free_unr(dtrace_arena, probe->dtpr_id); 7730 #endif 7731 kmem_free(probe, sizeof (dtrace_probe_t)); 7732 } 7733 7734 if ((prev = dtrace_provider) == old) { 7735 #if defined(sun) 7736 ASSERT(self || dtrace_devi == NULL); 7737 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7738 #endif 7739 dtrace_provider = old->dtpv_next; 7740 } else { 7741 while (prev != NULL && prev->dtpv_next != old) 7742 prev = prev->dtpv_next; 7743 7744 if (prev == NULL) { 7745 panic("attempt to unregister non-existent " 7746 "dtrace provider %p\n", (void *)id); 7747 } 7748 7749 prev->dtpv_next = old->dtpv_next; 7750 } 7751 7752 if (!self) { 7753 mutex_exit(&dtrace_lock); 7754 mutex_exit(&mod_lock); 7755 mutex_exit(&dtrace_provider_lock); 7756 } 7757 7758 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7759 kmem_free(old, sizeof (dtrace_provider_t)); 7760 7761 return (0); 7762 } 7763 7764 /* 7765 * Invalidate the specified provider. All subsequent probe lookups for the 7766 * specified provider will fail, but its probes will not be removed. 7767 */ 7768 void 7769 dtrace_invalidate(dtrace_provider_id_t id) 7770 { 7771 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7772 7773 ASSERT(pvp->dtpv_pops.dtps_enable != 7774 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7775 7776 mutex_enter(&dtrace_provider_lock); 7777 mutex_enter(&dtrace_lock); 7778 7779 pvp->dtpv_defunct = dtrace_gethrtime(); 7780 7781 mutex_exit(&dtrace_lock); 7782 mutex_exit(&dtrace_provider_lock); 7783 } 7784 7785 /* 7786 * Indicate whether or not DTrace has attached. 7787 */ 7788 int 7789 dtrace_attached(void) 7790 { 7791 /* 7792 * dtrace_provider will be non-NULL iff the DTrace driver has 7793 * attached. (It's non-NULL because DTrace is always itself a 7794 * provider.) 7795 */ 7796 return (dtrace_provider != NULL); 7797 } 7798 7799 /* 7800 * Remove all the unenabled probes for the given provider. This function is 7801 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7802 * -- just as many of its associated probes as it can. 7803 */ 7804 int 7805 dtrace_condense(dtrace_provider_id_t id) 7806 { 7807 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7808 int i; 7809 dtrace_probe_t *probe; 7810 7811 /* 7812 * Make sure this isn't the dtrace provider itself. 7813 */ 7814 ASSERT(prov->dtpv_pops.dtps_enable != 7815 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7816 7817 mutex_enter(&dtrace_provider_lock); 7818 mutex_enter(&dtrace_lock); 7819 7820 /* 7821 * Attempt to destroy the probes associated with this provider. 7822 */ 7823 for (i = 0; i < dtrace_nprobes; i++) { 7824 if ((probe = dtrace_probes[i]) == NULL) 7825 continue; 7826 7827 if (probe->dtpr_provider != prov) 7828 continue; 7829 7830 if (probe->dtpr_ecb != NULL) 7831 continue; 7832 7833 dtrace_probes[i] = NULL; 7834 7835 dtrace_hash_remove(dtrace_bymod, probe); 7836 dtrace_hash_remove(dtrace_byfunc, probe); 7837 dtrace_hash_remove(dtrace_byname, probe); 7838 7839 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7840 probe->dtpr_arg); 7841 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7842 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7843 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7844 kmem_free(probe, sizeof (dtrace_probe_t)); 7845 #if defined(sun) 7846 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7847 #else 7848 free_unr(dtrace_arena, i + 1); 7849 #endif 7850 } 7851 7852 mutex_exit(&dtrace_lock); 7853 mutex_exit(&dtrace_provider_lock); 7854 7855 return (0); 7856 } 7857 7858 /* 7859 * DTrace Probe Management Functions 7860 * 7861 * The functions in this section perform the DTrace probe management, 7862 * including functions to create probes, look-up probes, and call into the 7863 * providers to request that probes be provided. Some of these functions are 7864 * in the Provider-to-Framework API; these functions can be identified by the 7865 * fact that they are not declared "static". 7866 */ 7867 7868 /* 7869 * Create a probe with the specified module name, function name, and name. 7870 */ 7871 dtrace_id_t 7872 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7873 const char *func, const char *name, int aframes, void *arg) 7874 { 7875 dtrace_probe_t *probe, **probes; 7876 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7877 dtrace_id_t id; 7878 7879 if (provider == dtrace_provider) { 7880 ASSERT(MUTEX_HELD(&dtrace_lock)); 7881 } else { 7882 mutex_enter(&dtrace_lock); 7883 } 7884 7885 #if defined(sun) 7886 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7887 VM_BESTFIT | VM_SLEEP); 7888 #else 7889 id = alloc_unr(dtrace_arena); 7890 #endif 7891 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7892 7893 probe->dtpr_id = id; 7894 probe->dtpr_gen = dtrace_probegen++; 7895 probe->dtpr_mod = dtrace_strdup(mod); 7896 probe->dtpr_func = dtrace_strdup(func); 7897 probe->dtpr_name = dtrace_strdup(name); 7898 probe->dtpr_arg = arg; 7899 probe->dtpr_aframes = aframes; 7900 probe->dtpr_provider = provider; 7901 7902 dtrace_hash_add(dtrace_bymod, probe); 7903 dtrace_hash_add(dtrace_byfunc, probe); 7904 dtrace_hash_add(dtrace_byname, probe); 7905 7906 if (id - 1 >= dtrace_nprobes) { 7907 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7908 size_t nsize = osize << 1; 7909 7910 if (nsize == 0) { 7911 ASSERT(osize == 0); 7912 ASSERT(dtrace_probes == NULL); 7913 nsize = sizeof (dtrace_probe_t *); 7914 } 7915 7916 probes = kmem_zalloc(nsize, KM_SLEEP); 7917 7918 if (dtrace_probes == NULL) { 7919 ASSERT(osize == 0); 7920 dtrace_probes = probes; 7921 dtrace_nprobes = 1; 7922 } else { 7923 dtrace_probe_t **oprobes = dtrace_probes; 7924 7925 bcopy(oprobes, probes, osize); 7926 dtrace_membar_producer(); 7927 dtrace_probes = probes; 7928 7929 dtrace_sync(); 7930 7931 /* 7932 * All CPUs are now seeing the new probes array; we can 7933 * safely free the old array. 7934 */ 7935 kmem_free(oprobes, osize); 7936 dtrace_nprobes <<= 1; 7937 } 7938 7939 ASSERT(id - 1 < dtrace_nprobes); 7940 } 7941 7942 ASSERT(dtrace_probes[id - 1] == NULL); 7943 dtrace_probes[id - 1] = probe; 7944 7945 if (provider != dtrace_provider) 7946 mutex_exit(&dtrace_lock); 7947 7948 return (id); 7949 } 7950 7951 static dtrace_probe_t * 7952 dtrace_probe_lookup_id(dtrace_id_t id) 7953 { 7954 ASSERT(MUTEX_HELD(&dtrace_lock)); 7955 7956 if (id == 0 || id > dtrace_nprobes) 7957 return (NULL); 7958 7959 return (dtrace_probes[id - 1]); 7960 } 7961 7962 static int 7963 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7964 { 7965 *((dtrace_id_t *)arg) = probe->dtpr_id; 7966 7967 return (DTRACE_MATCH_DONE); 7968 } 7969 7970 /* 7971 * Look up a probe based on provider and one or more of module name, function 7972 * name and probe name. 7973 */ 7974 dtrace_id_t 7975 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7976 char *func, char *name) 7977 { 7978 dtrace_probekey_t pkey; 7979 dtrace_id_t id; 7980 int match; 7981 7982 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7983 pkey.dtpk_pmatch = &dtrace_match_string; 7984 pkey.dtpk_mod = mod; 7985 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7986 pkey.dtpk_func = func; 7987 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7988 pkey.dtpk_name = name; 7989 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7990 pkey.dtpk_id = DTRACE_IDNONE; 7991 7992 mutex_enter(&dtrace_lock); 7993 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7994 dtrace_probe_lookup_match, &id); 7995 mutex_exit(&dtrace_lock); 7996 7997 ASSERT(match == 1 || match == 0); 7998 return (match ? id : 0); 7999 } 8000 8001 /* 8002 * Returns the probe argument associated with the specified probe. 8003 */ 8004 void * 8005 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8006 { 8007 dtrace_probe_t *probe; 8008 void *rval = NULL; 8009 8010 mutex_enter(&dtrace_lock); 8011 8012 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8013 probe->dtpr_provider == (dtrace_provider_t *)id) 8014 rval = probe->dtpr_arg; 8015 8016 mutex_exit(&dtrace_lock); 8017 8018 return (rval); 8019 } 8020 8021 /* 8022 * Copy a probe into a probe description. 8023 */ 8024 static void 8025 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8026 { 8027 bzero(pdp, sizeof (dtrace_probedesc_t)); 8028 pdp->dtpd_id = prp->dtpr_id; 8029 8030 (void) strncpy(pdp->dtpd_provider, 8031 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8032 8033 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8034 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8035 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8036 } 8037 8038 #if !defined(sun) 8039 static int 8040 dtrace_probe_provide_cb(linker_file_t lf, void *arg) 8041 { 8042 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 8043 8044 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 8045 8046 return(0); 8047 } 8048 #endif 8049 8050 8051 /* 8052 * Called to indicate that a probe -- or probes -- should be provided by a 8053 * specfied provider. If the specified description is NULL, the provider will 8054 * be told to provide all of its probes. (This is done whenever a new 8055 * consumer comes along, or whenever a retained enabling is to be matched.) If 8056 * the specified description is non-NULL, the provider is given the 8057 * opportunity to dynamically provide the specified probe, allowing providers 8058 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8059 * probes.) If the provider is NULL, the operations will be applied to all 8060 * providers; if the provider is non-NULL the operations will only be applied 8061 * to the specified provider. The dtrace_provider_lock must be held, and the 8062 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8063 * will need to grab the dtrace_lock when it reenters the framework through 8064 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8065 */ 8066 static void 8067 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8068 { 8069 #if defined(sun) 8070 modctl_t *ctl; 8071 #endif 8072 int all = 0; 8073 8074 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8075 8076 if (prv == NULL) { 8077 all = 1; 8078 prv = dtrace_provider; 8079 } 8080 8081 do { 8082 /* 8083 * First, call the blanket provide operation. 8084 */ 8085 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8086 8087 /* 8088 * Now call the per-module provide operation. We will grab 8089 * mod_lock to prevent the list from being modified. Note 8090 * that this also prevents the mod_busy bits from changing. 8091 * (mod_busy can only be changed with mod_lock held.) 8092 */ 8093 mutex_enter(&mod_lock); 8094 8095 #if defined(sun) 8096 ctl = &modules; 8097 do { 8098 if (ctl->mod_busy || ctl->mod_mp == NULL) 8099 continue; 8100 8101 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8102 8103 } while ((ctl = ctl->mod_next) != &modules); 8104 #else 8105 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 8106 #endif 8107 8108 mutex_exit(&mod_lock); 8109 } while (all && (prv = prv->dtpv_next) != NULL); 8110 } 8111 8112 #if defined(sun) 8113 /* 8114 * Iterate over each probe, and call the Framework-to-Provider API function 8115 * denoted by offs. 8116 */ 8117 static void 8118 dtrace_probe_foreach(uintptr_t offs) 8119 { 8120 dtrace_provider_t *prov; 8121 void (*func)(void *, dtrace_id_t, void *); 8122 dtrace_probe_t *probe; 8123 dtrace_icookie_t cookie; 8124 int i; 8125 8126 /* 8127 * We disable interrupts to walk through the probe array. This is 8128 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8129 * won't see stale data. 8130 */ 8131 cookie = dtrace_interrupt_disable(); 8132 8133 for (i = 0; i < dtrace_nprobes; i++) { 8134 if ((probe = dtrace_probes[i]) == NULL) 8135 continue; 8136 8137 if (probe->dtpr_ecb == NULL) { 8138 /* 8139 * This probe isn't enabled -- don't call the function. 8140 */ 8141 continue; 8142 } 8143 8144 prov = probe->dtpr_provider; 8145 func = *((void(**)(void *, dtrace_id_t, void *)) 8146 ((uintptr_t)&prov->dtpv_pops + offs)); 8147 8148 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8149 } 8150 8151 dtrace_interrupt_enable(cookie); 8152 } 8153 #endif 8154 8155 static int 8156 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8157 { 8158 dtrace_probekey_t pkey; 8159 uint32_t priv; 8160 uid_t uid; 8161 zoneid_t zoneid; 8162 8163 ASSERT(MUTEX_HELD(&dtrace_lock)); 8164 dtrace_ecb_create_cache = NULL; 8165 8166 if (desc == NULL) { 8167 /* 8168 * If we're passed a NULL description, we're being asked to 8169 * create an ECB with a NULL probe. 8170 */ 8171 (void) dtrace_ecb_create_enable(NULL, enab); 8172 return (0); 8173 } 8174 8175 dtrace_probekey(desc, &pkey); 8176 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8177 &priv, &uid, &zoneid); 8178 8179 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8180 enab)); 8181 } 8182 8183 /* 8184 * DTrace Helper Provider Functions 8185 */ 8186 static void 8187 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8188 { 8189 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8190 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8191 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8192 } 8193 8194 static void 8195 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8196 const dof_provider_t *dofprov, char *strtab) 8197 { 8198 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8199 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8200 dofprov->dofpv_provattr); 8201 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8202 dofprov->dofpv_modattr); 8203 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8204 dofprov->dofpv_funcattr); 8205 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8206 dofprov->dofpv_nameattr); 8207 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8208 dofprov->dofpv_argsattr); 8209 } 8210 8211 static void 8212 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8213 { 8214 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8215 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8216 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8217 dof_provider_t *provider; 8218 dof_probe_t *probe; 8219 uint32_t *off, *enoff; 8220 uint8_t *arg; 8221 char *strtab; 8222 uint_t i, nprobes; 8223 dtrace_helper_provdesc_t dhpv; 8224 dtrace_helper_probedesc_t dhpb; 8225 dtrace_meta_t *meta = dtrace_meta_pid; 8226 dtrace_mops_t *mops = &meta->dtm_mops; 8227 void *parg; 8228 8229 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8230 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8231 provider->dofpv_strtab * dof->dofh_secsize); 8232 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8233 provider->dofpv_probes * dof->dofh_secsize); 8234 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8235 provider->dofpv_prargs * dof->dofh_secsize); 8236 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8237 provider->dofpv_proffs * dof->dofh_secsize); 8238 8239 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8240 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8241 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8242 enoff = NULL; 8243 8244 /* 8245 * See dtrace_helper_provider_validate(). 8246 */ 8247 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8248 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8249 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8250 provider->dofpv_prenoffs * dof->dofh_secsize); 8251 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8252 } 8253 8254 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8255 8256 /* 8257 * Create the provider. 8258 */ 8259 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8260 8261 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8262 return; 8263 8264 meta->dtm_count++; 8265 8266 /* 8267 * Create the probes. 8268 */ 8269 for (i = 0; i < nprobes; i++) { 8270 probe = (dof_probe_t *)(uintptr_t)(daddr + 8271 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8272 8273 dhpb.dthpb_mod = dhp->dofhp_mod; 8274 dhpb.dthpb_func = strtab + probe->dofpr_func; 8275 dhpb.dthpb_name = strtab + probe->dofpr_name; 8276 dhpb.dthpb_base = probe->dofpr_addr; 8277 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8278 dhpb.dthpb_noffs = probe->dofpr_noffs; 8279 if (enoff != NULL) { 8280 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8281 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8282 } else { 8283 dhpb.dthpb_enoffs = NULL; 8284 dhpb.dthpb_nenoffs = 0; 8285 } 8286 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8287 dhpb.dthpb_nargc = probe->dofpr_nargc; 8288 dhpb.dthpb_xargc = probe->dofpr_xargc; 8289 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8290 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8291 8292 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8293 } 8294 } 8295 8296 static void 8297 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8298 { 8299 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8300 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8301 int i; 8302 8303 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8304 8305 for (i = 0; i < dof->dofh_secnum; i++) { 8306 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8307 dof->dofh_secoff + i * dof->dofh_secsize); 8308 8309 if (sec->dofs_type != DOF_SECT_PROVIDER) 8310 continue; 8311 8312 dtrace_helper_provide_one(dhp, sec, pid); 8313 } 8314 8315 /* 8316 * We may have just created probes, so we must now rematch against 8317 * any retained enablings. Note that this call will acquire both 8318 * cpu_lock and dtrace_lock; the fact that we are holding 8319 * dtrace_meta_lock now is what defines the ordering with respect to 8320 * these three locks. 8321 */ 8322 dtrace_enabling_matchall(); 8323 } 8324 8325 static void 8326 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8327 { 8328 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8329 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8330 dof_sec_t *str_sec; 8331 dof_provider_t *provider; 8332 char *strtab; 8333 dtrace_helper_provdesc_t dhpv; 8334 dtrace_meta_t *meta = dtrace_meta_pid; 8335 dtrace_mops_t *mops = &meta->dtm_mops; 8336 8337 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8338 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8339 provider->dofpv_strtab * dof->dofh_secsize); 8340 8341 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8342 8343 /* 8344 * Create the provider. 8345 */ 8346 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8347 8348 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8349 8350 meta->dtm_count--; 8351 } 8352 8353 static void 8354 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8355 { 8356 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8357 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8358 int i; 8359 8360 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8361 8362 for (i = 0; i < dof->dofh_secnum; i++) { 8363 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8364 dof->dofh_secoff + i * dof->dofh_secsize); 8365 8366 if (sec->dofs_type != DOF_SECT_PROVIDER) 8367 continue; 8368 8369 dtrace_helper_provider_remove_one(dhp, sec, pid); 8370 } 8371 } 8372 8373 /* 8374 * DTrace Meta Provider-to-Framework API Functions 8375 * 8376 * These functions implement the Meta Provider-to-Framework API, as described 8377 * in <sys/dtrace.h>. 8378 */ 8379 int 8380 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8381 dtrace_meta_provider_id_t *idp) 8382 { 8383 dtrace_meta_t *meta; 8384 dtrace_helpers_t *help, *next; 8385 int i; 8386 8387 *idp = DTRACE_METAPROVNONE; 8388 8389 /* 8390 * We strictly don't need the name, but we hold onto it for 8391 * debuggability. All hail error queues! 8392 */ 8393 if (name == NULL) { 8394 cmn_err(CE_WARN, "failed to register meta-provider: " 8395 "invalid name"); 8396 return (EINVAL); 8397 } 8398 8399 if (mops == NULL || 8400 mops->dtms_create_probe == NULL || 8401 mops->dtms_provide_pid == NULL || 8402 mops->dtms_remove_pid == NULL) { 8403 cmn_err(CE_WARN, "failed to register meta-register %s: " 8404 "invalid ops", name); 8405 return (EINVAL); 8406 } 8407 8408 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8409 meta->dtm_mops = *mops; 8410 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8411 (void) strcpy(meta->dtm_name, name); 8412 meta->dtm_arg = arg; 8413 8414 mutex_enter(&dtrace_meta_lock); 8415 mutex_enter(&dtrace_lock); 8416 8417 if (dtrace_meta_pid != NULL) { 8418 mutex_exit(&dtrace_lock); 8419 mutex_exit(&dtrace_meta_lock); 8420 cmn_err(CE_WARN, "failed to register meta-register %s: " 8421 "user-land meta-provider exists", name); 8422 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8423 kmem_free(meta, sizeof (dtrace_meta_t)); 8424 return (EINVAL); 8425 } 8426 8427 dtrace_meta_pid = meta; 8428 *idp = (dtrace_meta_provider_id_t)meta; 8429 8430 /* 8431 * If there are providers and probes ready to go, pass them 8432 * off to the new meta provider now. 8433 */ 8434 8435 help = dtrace_deferred_pid; 8436 dtrace_deferred_pid = NULL; 8437 8438 mutex_exit(&dtrace_lock); 8439 8440 while (help != NULL) { 8441 for (i = 0; i < help->dthps_nprovs; i++) { 8442 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8443 help->dthps_pid); 8444 } 8445 8446 next = help->dthps_next; 8447 help->dthps_next = NULL; 8448 help->dthps_prev = NULL; 8449 help->dthps_deferred = 0; 8450 help = next; 8451 } 8452 8453 mutex_exit(&dtrace_meta_lock); 8454 8455 return (0); 8456 } 8457 8458 int 8459 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8460 { 8461 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8462 8463 mutex_enter(&dtrace_meta_lock); 8464 mutex_enter(&dtrace_lock); 8465 8466 if (old == dtrace_meta_pid) { 8467 pp = &dtrace_meta_pid; 8468 } else { 8469 panic("attempt to unregister non-existent " 8470 "dtrace meta-provider %p\n", (void *)old); 8471 } 8472 8473 if (old->dtm_count != 0) { 8474 mutex_exit(&dtrace_lock); 8475 mutex_exit(&dtrace_meta_lock); 8476 return (EBUSY); 8477 } 8478 8479 *pp = NULL; 8480 8481 mutex_exit(&dtrace_lock); 8482 mutex_exit(&dtrace_meta_lock); 8483 8484 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8485 kmem_free(old, sizeof (dtrace_meta_t)); 8486 8487 return (0); 8488 } 8489 8490 8491 /* 8492 * DTrace DIF Object Functions 8493 */ 8494 static int 8495 dtrace_difo_err(uint_t pc, const char *format, ...) 8496 { 8497 if (dtrace_err_verbose) { 8498 va_list alist; 8499 8500 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8501 va_start(alist, format); 8502 (void) vuprintf(format, alist); 8503 va_end(alist); 8504 } 8505 8506 #ifdef DTRACE_ERRDEBUG 8507 dtrace_errdebug(format); 8508 #endif 8509 return (1); 8510 } 8511 8512 /* 8513 * Validate a DTrace DIF object by checking the IR instructions. The following 8514 * rules are currently enforced by dtrace_difo_validate(): 8515 * 8516 * 1. Each instruction must have a valid opcode 8517 * 2. Each register, string, variable, or subroutine reference must be valid 8518 * 3. No instruction can modify register %r0 (must be zero) 8519 * 4. All instruction reserved bits must be set to zero 8520 * 5. The last instruction must be a "ret" instruction 8521 * 6. All branch targets must reference a valid instruction _after_ the branch 8522 */ 8523 static int 8524 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8525 cred_t *cr) 8526 { 8527 int err = 0, i; 8528 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8529 int kcheckload; 8530 uint_t pc; 8531 8532 kcheckload = cr == NULL || 8533 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8534 8535 dp->dtdo_destructive = 0; 8536 8537 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8538 dif_instr_t instr = dp->dtdo_buf[pc]; 8539 8540 uint_t r1 = DIF_INSTR_R1(instr); 8541 uint_t r2 = DIF_INSTR_R2(instr); 8542 uint_t rd = DIF_INSTR_RD(instr); 8543 uint_t rs = DIF_INSTR_RS(instr); 8544 uint_t label = DIF_INSTR_LABEL(instr); 8545 uint_t v = DIF_INSTR_VAR(instr); 8546 uint_t subr = DIF_INSTR_SUBR(instr); 8547 uint_t type = DIF_INSTR_TYPE(instr); 8548 uint_t op = DIF_INSTR_OP(instr); 8549 8550 switch (op) { 8551 case DIF_OP_OR: 8552 case DIF_OP_XOR: 8553 case DIF_OP_AND: 8554 case DIF_OP_SLL: 8555 case DIF_OP_SRL: 8556 case DIF_OP_SRA: 8557 case DIF_OP_SUB: 8558 case DIF_OP_ADD: 8559 case DIF_OP_MUL: 8560 case DIF_OP_SDIV: 8561 case DIF_OP_UDIV: 8562 case DIF_OP_SREM: 8563 case DIF_OP_UREM: 8564 case DIF_OP_COPYS: 8565 if (r1 >= nregs) 8566 err += efunc(pc, "invalid register %u\n", r1); 8567 if (r2 >= nregs) 8568 err += efunc(pc, "invalid register %u\n", r2); 8569 if (rd >= nregs) 8570 err += efunc(pc, "invalid register %u\n", rd); 8571 if (rd == 0) 8572 err += efunc(pc, "cannot write to %r0\n"); 8573 break; 8574 case DIF_OP_NOT: 8575 case DIF_OP_MOV: 8576 case DIF_OP_ALLOCS: 8577 if (r1 >= nregs) 8578 err += efunc(pc, "invalid register %u\n", r1); 8579 if (r2 != 0) 8580 err += efunc(pc, "non-zero reserved bits\n"); 8581 if (rd >= nregs) 8582 err += efunc(pc, "invalid register %u\n", rd); 8583 if (rd == 0) 8584 err += efunc(pc, "cannot write to %r0\n"); 8585 break; 8586 case DIF_OP_LDSB: 8587 case DIF_OP_LDSH: 8588 case DIF_OP_LDSW: 8589 case DIF_OP_LDUB: 8590 case DIF_OP_LDUH: 8591 case DIF_OP_LDUW: 8592 case DIF_OP_LDX: 8593 if (r1 >= nregs) 8594 err += efunc(pc, "invalid register %u\n", r1); 8595 if (r2 != 0) 8596 err += efunc(pc, "non-zero reserved bits\n"); 8597 if (rd >= nregs) 8598 err += efunc(pc, "invalid register %u\n", rd); 8599 if (rd == 0) 8600 err += efunc(pc, "cannot write to %r0\n"); 8601 if (kcheckload) 8602 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8603 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8604 break; 8605 case DIF_OP_RLDSB: 8606 case DIF_OP_RLDSH: 8607 case DIF_OP_RLDSW: 8608 case DIF_OP_RLDUB: 8609 case DIF_OP_RLDUH: 8610 case DIF_OP_RLDUW: 8611 case DIF_OP_RLDX: 8612 if (r1 >= nregs) 8613 err += efunc(pc, "invalid register %u\n", r1); 8614 if (r2 != 0) 8615 err += efunc(pc, "non-zero reserved bits\n"); 8616 if (rd >= nregs) 8617 err += efunc(pc, "invalid register %u\n", rd); 8618 if (rd == 0) 8619 err += efunc(pc, "cannot write to %r0\n"); 8620 break; 8621 case DIF_OP_ULDSB: 8622 case DIF_OP_ULDSH: 8623 case DIF_OP_ULDSW: 8624 case DIF_OP_ULDUB: 8625 case DIF_OP_ULDUH: 8626 case DIF_OP_ULDUW: 8627 case DIF_OP_ULDX: 8628 if (r1 >= nregs) 8629 err += efunc(pc, "invalid register %u\n", r1); 8630 if (r2 != 0) 8631 err += efunc(pc, "non-zero reserved bits\n"); 8632 if (rd >= nregs) 8633 err += efunc(pc, "invalid register %u\n", rd); 8634 if (rd == 0) 8635 err += efunc(pc, "cannot write to %r0\n"); 8636 break; 8637 case DIF_OP_STB: 8638 case DIF_OP_STH: 8639 case DIF_OP_STW: 8640 case DIF_OP_STX: 8641 if (r1 >= nregs) 8642 err += efunc(pc, "invalid register %u\n", r1); 8643 if (r2 != 0) 8644 err += efunc(pc, "non-zero reserved bits\n"); 8645 if (rd >= nregs) 8646 err += efunc(pc, "invalid register %u\n", rd); 8647 if (rd == 0) 8648 err += efunc(pc, "cannot write to 0 address\n"); 8649 break; 8650 case DIF_OP_CMP: 8651 case DIF_OP_SCMP: 8652 if (r1 >= nregs) 8653 err += efunc(pc, "invalid register %u\n", r1); 8654 if (r2 >= nregs) 8655 err += efunc(pc, "invalid register %u\n", r2); 8656 if (rd != 0) 8657 err += efunc(pc, "non-zero reserved bits\n"); 8658 break; 8659 case DIF_OP_TST: 8660 if (r1 >= nregs) 8661 err += efunc(pc, "invalid register %u\n", r1); 8662 if (r2 != 0 || rd != 0) 8663 err += efunc(pc, "non-zero reserved bits\n"); 8664 break; 8665 case DIF_OP_BA: 8666 case DIF_OP_BE: 8667 case DIF_OP_BNE: 8668 case DIF_OP_BG: 8669 case DIF_OP_BGU: 8670 case DIF_OP_BGE: 8671 case DIF_OP_BGEU: 8672 case DIF_OP_BL: 8673 case DIF_OP_BLU: 8674 case DIF_OP_BLE: 8675 case DIF_OP_BLEU: 8676 if (label >= dp->dtdo_len) { 8677 err += efunc(pc, "invalid branch target %u\n", 8678 label); 8679 } 8680 if (label <= pc) { 8681 err += efunc(pc, "backward branch to %u\n", 8682 label); 8683 } 8684 break; 8685 case DIF_OP_RET: 8686 if (r1 != 0 || r2 != 0) 8687 err += efunc(pc, "non-zero reserved bits\n"); 8688 if (rd >= nregs) 8689 err += efunc(pc, "invalid register %u\n", rd); 8690 break; 8691 case DIF_OP_NOP: 8692 case DIF_OP_POPTS: 8693 case DIF_OP_FLUSHTS: 8694 if (r1 != 0 || r2 != 0 || rd != 0) 8695 err += efunc(pc, "non-zero reserved bits\n"); 8696 break; 8697 case DIF_OP_SETX: 8698 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8699 err += efunc(pc, "invalid integer ref %u\n", 8700 DIF_INSTR_INTEGER(instr)); 8701 } 8702 if (rd >= nregs) 8703 err += efunc(pc, "invalid register %u\n", rd); 8704 if (rd == 0) 8705 err += efunc(pc, "cannot write to %r0\n"); 8706 break; 8707 case DIF_OP_SETS: 8708 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8709 err += efunc(pc, "invalid string ref %u\n", 8710 DIF_INSTR_STRING(instr)); 8711 } 8712 if (rd >= nregs) 8713 err += efunc(pc, "invalid register %u\n", rd); 8714 if (rd == 0) 8715 err += efunc(pc, "cannot write to %r0\n"); 8716 break; 8717 case DIF_OP_LDGA: 8718 case DIF_OP_LDTA: 8719 if (r1 > DIF_VAR_ARRAY_MAX) 8720 err += efunc(pc, "invalid array %u\n", r1); 8721 if (r2 >= nregs) 8722 err += efunc(pc, "invalid register %u\n", r2); 8723 if (rd >= nregs) 8724 err += efunc(pc, "invalid register %u\n", rd); 8725 if (rd == 0) 8726 err += efunc(pc, "cannot write to %r0\n"); 8727 break; 8728 case DIF_OP_LDGS: 8729 case DIF_OP_LDTS: 8730 case DIF_OP_LDLS: 8731 case DIF_OP_LDGAA: 8732 case DIF_OP_LDTAA: 8733 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8734 err += efunc(pc, "invalid variable %u\n", v); 8735 if (rd >= nregs) 8736 err += efunc(pc, "invalid register %u\n", rd); 8737 if (rd == 0) 8738 err += efunc(pc, "cannot write to %r0\n"); 8739 break; 8740 case DIF_OP_STGS: 8741 case DIF_OP_STTS: 8742 case DIF_OP_STLS: 8743 case DIF_OP_STGAA: 8744 case DIF_OP_STTAA: 8745 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8746 err += efunc(pc, "invalid variable %u\n", v); 8747 if (rs >= nregs) 8748 err += efunc(pc, "invalid register %u\n", rd); 8749 break; 8750 case DIF_OP_CALL: 8751 if (subr > DIF_SUBR_MAX) 8752 err += efunc(pc, "invalid subr %u\n", subr); 8753 if (rd >= nregs) 8754 err += efunc(pc, "invalid register %u\n", rd); 8755 if (rd == 0) 8756 err += efunc(pc, "cannot write to %r0\n"); 8757 8758 if (subr == DIF_SUBR_COPYOUT || 8759 subr == DIF_SUBR_COPYOUTSTR) { 8760 dp->dtdo_destructive = 1; 8761 } 8762 break; 8763 case DIF_OP_PUSHTR: 8764 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8765 err += efunc(pc, "invalid ref type %u\n", type); 8766 if (r2 >= nregs) 8767 err += efunc(pc, "invalid register %u\n", r2); 8768 if (rs >= nregs) 8769 err += efunc(pc, "invalid register %u\n", rs); 8770 break; 8771 case DIF_OP_PUSHTV: 8772 if (type != DIF_TYPE_CTF) 8773 err += efunc(pc, "invalid val type %u\n", type); 8774 if (r2 >= nregs) 8775 err += efunc(pc, "invalid register %u\n", r2); 8776 if (rs >= nregs) 8777 err += efunc(pc, "invalid register %u\n", rs); 8778 break; 8779 default: 8780 err += efunc(pc, "invalid opcode %u\n", 8781 DIF_INSTR_OP(instr)); 8782 } 8783 } 8784 8785 if (dp->dtdo_len != 0 && 8786 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8787 err += efunc(dp->dtdo_len - 1, 8788 "expected 'ret' as last DIF instruction\n"); 8789 } 8790 8791 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8792 /* 8793 * If we're not returning by reference, the size must be either 8794 * 0 or the size of one of the base types. 8795 */ 8796 switch (dp->dtdo_rtype.dtdt_size) { 8797 case 0: 8798 case sizeof (uint8_t): 8799 case sizeof (uint16_t): 8800 case sizeof (uint32_t): 8801 case sizeof (uint64_t): 8802 break; 8803 8804 default: 8805 err += efunc(dp->dtdo_len - 1, "bad return size"); 8806 } 8807 } 8808 8809 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8810 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8811 dtrace_diftype_t *vt, *et; 8812 uint_t id, ndx; 8813 8814 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8815 v->dtdv_scope != DIFV_SCOPE_THREAD && 8816 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8817 err += efunc(i, "unrecognized variable scope %d\n", 8818 v->dtdv_scope); 8819 break; 8820 } 8821 8822 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8823 v->dtdv_kind != DIFV_KIND_SCALAR) { 8824 err += efunc(i, "unrecognized variable type %d\n", 8825 v->dtdv_kind); 8826 break; 8827 } 8828 8829 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8830 err += efunc(i, "%d exceeds variable id limit\n", id); 8831 break; 8832 } 8833 8834 if (id < DIF_VAR_OTHER_UBASE) 8835 continue; 8836 8837 /* 8838 * For user-defined variables, we need to check that this 8839 * definition is identical to any previous definition that we 8840 * encountered. 8841 */ 8842 ndx = id - DIF_VAR_OTHER_UBASE; 8843 8844 switch (v->dtdv_scope) { 8845 case DIFV_SCOPE_GLOBAL: 8846 if (ndx < vstate->dtvs_nglobals) { 8847 dtrace_statvar_t *svar; 8848 8849 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8850 existing = &svar->dtsv_var; 8851 } 8852 8853 break; 8854 8855 case DIFV_SCOPE_THREAD: 8856 if (ndx < vstate->dtvs_ntlocals) 8857 existing = &vstate->dtvs_tlocals[ndx]; 8858 break; 8859 8860 case DIFV_SCOPE_LOCAL: 8861 if (ndx < vstate->dtvs_nlocals) { 8862 dtrace_statvar_t *svar; 8863 8864 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8865 existing = &svar->dtsv_var; 8866 } 8867 8868 break; 8869 } 8870 8871 vt = &v->dtdv_type; 8872 8873 if (vt->dtdt_flags & DIF_TF_BYREF) { 8874 if (vt->dtdt_size == 0) { 8875 err += efunc(i, "zero-sized variable\n"); 8876 break; 8877 } 8878 8879 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8880 vt->dtdt_size > dtrace_global_maxsize) { 8881 err += efunc(i, "oversized by-ref global\n"); 8882 break; 8883 } 8884 } 8885 8886 if (existing == NULL || existing->dtdv_id == 0) 8887 continue; 8888 8889 ASSERT(existing->dtdv_id == v->dtdv_id); 8890 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8891 8892 if (existing->dtdv_kind != v->dtdv_kind) 8893 err += efunc(i, "%d changed variable kind\n", id); 8894 8895 et = &existing->dtdv_type; 8896 8897 if (vt->dtdt_flags != et->dtdt_flags) { 8898 err += efunc(i, "%d changed variable type flags\n", id); 8899 break; 8900 } 8901 8902 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8903 err += efunc(i, "%d changed variable type size\n", id); 8904 break; 8905 } 8906 } 8907 8908 return (err); 8909 } 8910 8911 /* 8912 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8913 * are much more constrained than normal DIFOs. Specifically, they may 8914 * not: 8915 * 8916 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8917 * miscellaneous string routines 8918 * 2. Access DTrace variables other than the args[] array, and the 8919 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8920 * 3. Have thread-local variables. 8921 * 4. Have dynamic variables. 8922 */ 8923 static int 8924 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8925 { 8926 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8927 int err = 0; 8928 uint_t pc; 8929 8930 for (pc = 0; pc < dp->dtdo_len; pc++) { 8931 dif_instr_t instr = dp->dtdo_buf[pc]; 8932 8933 uint_t v = DIF_INSTR_VAR(instr); 8934 uint_t subr = DIF_INSTR_SUBR(instr); 8935 uint_t op = DIF_INSTR_OP(instr); 8936 8937 switch (op) { 8938 case DIF_OP_OR: 8939 case DIF_OP_XOR: 8940 case DIF_OP_AND: 8941 case DIF_OP_SLL: 8942 case DIF_OP_SRL: 8943 case DIF_OP_SRA: 8944 case DIF_OP_SUB: 8945 case DIF_OP_ADD: 8946 case DIF_OP_MUL: 8947 case DIF_OP_SDIV: 8948 case DIF_OP_UDIV: 8949 case DIF_OP_SREM: 8950 case DIF_OP_UREM: 8951 case DIF_OP_COPYS: 8952 case DIF_OP_NOT: 8953 case DIF_OP_MOV: 8954 case DIF_OP_RLDSB: 8955 case DIF_OP_RLDSH: 8956 case DIF_OP_RLDSW: 8957 case DIF_OP_RLDUB: 8958 case DIF_OP_RLDUH: 8959 case DIF_OP_RLDUW: 8960 case DIF_OP_RLDX: 8961 case DIF_OP_ULDSB: 8962 case DIF_OP_ULDSH: 8963 case DIF_OP_ULDSW: 8964 case DIF_OP_ULDUB: 8965 case DIF_OP_ULDUH: 8966 case DIF_OP_ULDUW: 8967 case DIF_OP_ULDX: 8968 case DIF_OP_STB: 8969 case DIF_OP_STH: 8970 case DIF_OP_STW: 8971 case DIF_OP_STX: 8972 case DIF_OP_ALLOCS: 8973 case DIF_OP_CMP: 8974 case DIF_OP_SCMP: 8975 case DIF_OP_TST: 8976 case DIF_OP_BA: 8977 case DIF_OP_BE: 8978 case DIF_OP_BNE: 8979 case DIF_OP_BG: 8980 case DIF_OP_BGU: 8981 case DIF_OP_BGE: 8982 case DIF_OP_BGEU: 8983 case DIF_OP_BL: 8984 case DIF_OP_BLU: 8985 case DIF_OP_BLE: 8986 case DIF_OP_BLEU: 8987 case DIF_OP_RET: 8988 case DIF_OP_NOP: 8989 case DIF_OP_POPTS: 8990 case DIF_OP_FLUSHTS: 8991 case DIF_OP_SETX: 8992 case DIF_OP_SETS: 8993 case DIF_OP_LDGA: 8994 case DIF_OP_LDLS: 8995 case DIF_OP_STGS: 8996 case DIF_OP_STLS: 8997 case DIF_OP_PUSHTR: 8998 case DIF_OP_PUSHTV: 8999 break; 9000 9001 case DIF_OP_LDGS: 9002 if (v >= DIF_VAR_OTHER_UBASE) 9003 break; 9004 9005 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 9006 break; 9007 9008 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 9009 v == DIF_VAR_PPID || v == DIF_VAR_TID || 9010 v == DIF_VAR_EXECARGS || 9011 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9012 v == DIF_VAR_UID || v == DIF_VAR_GID) 9013 break; 9014 9015 err += efunc(pc, "illegal variable %u\n", v); 9016 break; 9017 9018 case DIF_OP_LDTA: 9019 case DIF_OP_LDTS: 9020 case DIF_OP_LDGAA: 9021 case DIF_OP_LDTAA: 9022 err += efunc(pc, "illegal dynamic variable load\n"); 9023 break; 9024 9025 case DIF_OP_STTS: 9026 case DIF_OP_STGAA: 9027 case DIF_OP_STTAA: 9028 err += efunc(pc, "illegal dynamic variable store\n"); 9029 break; 9030 9031 case DIF_OP_CALL: 9032 if (subr == DIF_SUBR_ALLOCA || 9033 subr == DIF_SUBR_BCOPY || 9034 subr == DIF_SUBR_COPYIN || 9035 subr == DIF_SUBR_COPYINTO || 9036 subr == DIF_SUBR_COPYINSTR || 9037 subr == DIF_SUBR_INDEX || 9038 subr == DIF_SUBR_INET_NTOA || 9039 subr == DIF_SUBR_INET_NTOA6 || 9040 subr == DIF_SUBR_INET_NTOP || 9041 subr == DIF_SUBR_LLTOSTR || 9042 subr == DIF_SUBR_RINDEX || 9043 subr == DIF_SUBR_STRCHR || 9044 subr == DIF_SUBR_STRJOIN || 9045 subr == DIF_SUBR_STRRCHR || 9046 subr == DIF_SUBR_STRSTR || 9047 subr == DIF_SUBR_HTONS || 9048 subr == DIF_SUBR_HTONL || 9049 subr == DIF_SUBR_HTONLL || 9050 subr == DIF_SUBR_NTOHS || 9051 subr == DIF_SUBR_NTOHL || 9052 subr == DIF_SUBR_NTOHLL || 9053 subr == DIF_SUBR_MEMREF || 9054 subr == DIF_SUBR_TYPEREF) 9055 break; 9056 9057 err += efunc(pc, "invalid subr %u\n", subr); 9058 break; 9059 9060 default: 9061 err += efunc(pc, "invalid opcode %u\n", 9062 DIF_INSTR_OP(instr)); 9063 } 9064 } 9065 9066 return (err); 9067 } 9068 9069 /* 9070 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9071 * basis; 0 if not. 9072 */ 9073 static int 9074 dtrace_difo_cacheable(dtrace_difo_t *dp) 9075 { 9076 int i; 9077 9078 if (dp == NULL) 9079 return (0); 9080 9081 for (i = 0; i < dp->dtdo_varlen; i++) { 9082 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9083 9084 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9085 continue; 9086 9087 switch (v->dtdv_id) { 9088 case DIF_VAR_CURTHREAD: 9089 case DIF_VAR_PID: 9090 case DIF_VAR_TID: 9091 case DIF_VAR_EXECARGS: 9092 case DIF_VAR_EXECNAME: 9093 case DIF_VAR_ZONENAME: 9094 break; 9095 9096 default: 9097 return (0); 9098 } 9099 } 9100 9101 /* 9102 * This DIF object may be cacheable. Now we need to look for any 9103 * array loading instructions, any memory loading instructions, or 9104 * any stores to thread-local variables. 9105 */ 9106 for (i = 0; i < dp->dtdo_len; i++) { 9107 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9108 9109 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9110 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9111 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9112 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9113 return (0); 9114 } 9115 9116 return (1); 9117 } 9118 9119 static void 9120 dtrace_difo_hold(dtrace_difo_t *dp) 9121 { 9122 int i; 9123 9124 ASSERT(MUTEX_HELD(&dtrace_lock)); 9125 9126 dp->dtdo_refcnt++; 9127 ASSERT(dp->dtdo_refcnt != 0); 9128 9129 /* 9130 * We need to check this DIF object for references to the variable 9131 * DIF_VAR_VTIMESTAMP. 9132 */ 9133 for (i = 0; i < dp->dtdo_varlen; i++) { 9134 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9135 9136 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9137 continue; 9138 9139 if (dtrace_vtime_references++ == 0) 9140 dtrace_vtime_enable(); 9141 } 9142 } 9143 9144 /* 9145 * This routine calculates the dynamic variable chunksize for a given DIF 9146 * object. The calculation is not fool-proof, and can probably be tricked by 9147 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9148 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9149 * if a dynamic variable size exceeds the chunksize. 9150 */ 9151 static void 9152 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9153 { 9154 uint64_t sval = 0; 9155 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9156 const dif_instr_t *text = dp->dtdo_buf; 9157 uint_t pc, srd = 0; 9158 uint_t ttop = 0; 9159 size_t size, ksize; 9160 uint_t id, i; 9161 9162 for (pc = 0; pc < dp->dtdo_len; pc++) { 9163 dif_instr_t instr = text[pc]; 9164 uint_t op = DIF_INSTR_OP(instr); 9165 uint_t rd = DIF_INSTR_RD(instr); 9166 uint_t r1 = DIF_INSTR_R1(instr); 9167 uint_t nkeys = 0; 9168 uchar_t scope = 0; 9169 9170 dtrace_key_t *key = tupregs; 9171 9172 switch (op) { 9173 case DIF_OP_SETX: 9174 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9175 srd = rd; 9176 continue; 9177 9178 case DIF_OP_STTS: 9179 key = &tupregs[DIF_DTR_NREGS]; 9180 key[0].dttk_size = 0; 9181 key[1].dttk_size = 0; 9182 nkeys = 2; 9183 scope = DIFV_SCOPE_THREAD; 9184 break; 9185 9186 case DIF_OP_STGAA: 9187 case DIF_OP_STTAA: 9188 nkeys = ttop; 9189 9190 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9191 key[nkeys++].dttk_size = 0; 9192 9193 key[nkeys++].dttk_size = 0; 9194 9195 if (op == DIF_OP_STTAA) { 9196 scope = DIFV_SCOPE_THREAD; 9197 } else { 9198 scope = DIFV_SCOPE_GLOBAL; 9199 } 9200 9201 break; 9202 9203 case DIF_OP_PUSHTR: 9204 if (ttop == DIF_DTR_NREGS) 9205 return; 9206 9207 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9208 /* 9209 * If the register for the size of the "pushtr" 9210 * is %r0 (or the value is 0) and the type is 9211 * a string, we'll use the system-wide default 9212 * string size. 9213 */ 9214 tupregs[ttop++].dttk_size = 9215 dtrace_strsize_default; 9216 } else { 9217 if (srd == 0) 9218 return; 9219 9220 tupregs[ttop++].dttk_size = sval; 9221 } 9222 9223 break; 9224 9225 case DIF_OP_PUSHTV: 9226 if (ttop == DIF_DTR_NREGS) 9227 return; 9228 9229 tupregs[ttop++].dttk_size = 0; 9230 break; 9231 9232 case DIF_OP_FLUSHTS: 9233 ttop = 0; 9234 break; 9235 9236 case DIF_OP_POPTS: 9237 if (ttop != 0) 9238 ttop--; 9239 break; 9240 } 9241 9242 sval = 0; 9243 srd = 0; 9244 9245 if (nkeys == 0) 9246 continue; 9247 9248 /* 9249 * We have a dynamic variable allocation; calculate its size. 9250 */ 9251 for (ksize = 0, i = 0; i < nkeys; i++) 9252 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9253 9254 size = sizeof (dtrace_dynvar_t); 9255 size += sizeof (dtrace_key_t) * (nkeys - 1); 9256 size += ksize; 9257 9258 /* 9259 * Now we need to determine the size of the stored data. 9260 */ 9261 id = DIF_INSTR_VAR(instr); 9262 9263 for (i = 0; i < dp->dtdo_varlen; i++) { 9264 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9265 9266 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9267 size += v->dtdv_type.dtdt_size; 9268 break; 9269 } 9270 } 9271 9272 if (i == dp->dtdo_varlen) 9273 return; 9274 9275 /* 9276 * We have the size. If this is larger than the chunk size 9277 * for our dynamic variable state, reset the chunk size. 9278 */ 9279 size = P2ROUNDUP(size, sizeof (uint64_t)); 9280 9281 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9282 vstate->dtvs_dynvars.dtds_chunksize = size; 9283 } 9284 } 9285 9286 static void 9287 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9288 { 9289 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9290 uint_t id; 9291 9292 ASSERT(MUTEX_HELD(&dtrace_lock)); 9293 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9294 9295 for (i = 0; i < dp->dtdo_varlen; i++) { 9296 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9297 dtrace_statvar_t *svar, ***svarp = NULL; 9298 size_t dsize = 0; 9299 uint8_t scope = v->dtdv_scope; 9300 int *np = NULL; 9301 9302 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9303 continue; 9304 9305 id -= DIF_VAR_OTHER_UBASE; 9306 9307 switch (scope) { 9308 case DIFV_SCOPE_THREAD: 9309 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9310 dtrace_difv_t *tlocals; 9311 9312 if ((ntlocals = (otlocals << 1)) == 0) 9313 ntlocals = 1; 9314 9315 osz = otlocals * sizeof (dtrace_difv_t); 9316 nsz = ntlocals * sizeof (dtrace_difv_t); 9317 9318 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9319 9320 if (osz != 0) { 9321 bcopy(vstate->dtvs_tlocals, 9322 tlocals, osz); 9323 kmem_free(vstate->dtvs_tlocals, osz); 9324 } 9325 9326 vstate->dtvs_tlocals = tlocals; 9327 vstate->dtvs_ntlocals = ntlocals; 9328 } 9329 9330 vstate->dtvs_tlocals[id] = *v; 9331 continue; 9332 9333 case DIFV_SCOPE_LOCAL: 9334 np = &vstate->dtvs_nlocals; 9335 svarp = &vstate->dtvs_locals; 9336 9337 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9338 dsize = NCPU * (v->dtdv_type.dtdt_size + 9339 sizeof (uint64_t)); 9340 else 9341 dsize = NCPU * sizeof (uint64_t); 9342 9343 break; 9344 9345 case DIFV_SCOPE_GLOBAL: 9346 np = &vstate->dtvs_nglobals; 9347 svarp = &vstate->dtvs_globals; 9348 9349 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9350 dsize = v->dtdv_type.dtdt_size + 9351 sizeof (uint64_t); 9352 9353 break; 9354 9355 default: 9356 ASSERT(0); 9357 } 9358 9359 while (id >= (oldsvars = *np)) { 9360 dtrace_statvar_t **statics; 9361 int newsvars, oldsize, newsize; 9362 9363 if ((newsvars = (oldsvars << 1)) == 0) 9364 newsvars = 1; 9365 9366 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9367 newsize = newsvars * sizeof (dtrace_statvar_t *); 9368 9369 statics = kmem_zalloc(newsize, KM_SLEEP); 9370 9371 if (oldsize != 0) { 9372 bcopy(*svarp, statics, oldsize); 9373 kmem_free(*svarp, oldsize); 9374 } 9375 9376 *svarp = statics; 9377 *np = newsvars; 9378 } 9379 9380 if ((svar = (*svarp)[id]) == NULL) { 9381 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9382 svar->dtsv_var = *v; 9383 9384 if ((svar->dtsv_size = dsize) != 0) { 9385 svar->dtsv_data = (uint64_t)(uintptr_t) 9386 kmem_zalloc(dsize, KM_SLEEP); 9387 } 9388 9389 (*svarp)[id] = svar; 9390 } 9391 9392 svar->dtsv_refcnt++; 9393 } 9394 9395 dtrace_difo_chunksize(dp, vstate); 9396 dtrace_difo_hold(dp); 9397 } 9398 9399 static dtrace_difo_t * 9400 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9401 { 9402 dtrace_difo_t *new; 9403 size_t sz; 9404 9405 ASSERT(dp->dtdo_buf != NULL); 9406 ASSERT(dp->dtdo_refcnt != 0); 9407 9408 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9409 9410 ASSERT(dp->dtdo_buf != NULL); 9411 sz = dp->dtdo_len * sizeof (dif_instr_t); 9412 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9413 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9414 new->dtdo_len = dp->dtdo_len; 9415 9416 if (dp->dtdo_strtab != NULL) { 9417 ASSERT(dp->dtdo_strlen != 0); 9418 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9419 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9420 new->dtdo_strlen = dp->dtdo_strlen; 9421 } 9422 9423 if (dp->dtdo_inttab != NULL) { 9424 ASSERT(dp->dtdo_intlen != 0); 9425 sz = dp->dtdo_intlen * sizeof (uint64_t); 9426 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9427 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9428 new->dtdo_intlen = dp->dtdo_intlen; 9429 } 9430 9431 if (dp->dtdo_vartab != NULL) { 9432 ASSERT(dp->dtdo_varlen != 0); 9433 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9434 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9435 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9436 new->dtdo_varlen = dp->dtdo_varlen; 9437 } 9438 9439 dtrace_difo_init(new, vstate); 9440 return (new); 9441 } 9442 9443 static void 9444 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9445 { 9446 int i; 9447 9448 ASSERT(dp->dtdo_refcnt == 0); 9449 9450 for (i = 0; i < dp->dtdo_varlen; i++) { 9451 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9452 dtrace_statvar_t *svar, **svarp = NULL; 9453 uint_t id; 9454 uint8_t scope = v->dtdv_scope; 9455 int *np = NULL; 9456 9457 switch (scope) { 9458 case DIFV_SCOPE_THREAD: 9459 continue; 9460 9461 case DIFV_SCOPE_LOCAL: 9462 np = &vstate->dtvs_nlocals; 9463 svarp = vstate->dtvs_locals; 9464 break; 9465 9466 case DIFV_SCOPE_GLOBAL: 9467 np = &vstate->dtvs_nglobals; 9468 svarp = vstate->dtvs_globals; 9469 break; 9470 9471 default: 9472 ASSERT(0); 9473 } 9474 9475 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9476 continue; 9477 9478 id -= DIF_VAR_OTHER_UBASE; 9479 ASSERT(id < *np); 9480 9481 svar = svarp[id]; 9482 ASSERT(svar != NULL); 9483 ASSERT(svar->dtsv_refcnt > 0); 9484 9485 if (--svar->dtsv_refcnt > 0) 9486 continue; 9487 9488 if (svar->dtsv_size != 0) { 9489 ASSERT(svar->dtsv_data != 0); 9490 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9491 svar->dtsv_size); 9492 } 9493 9494 kmem_free(svar, sizeof (dtrace_statvar_t)); 9495 svarp[id] = NULL; 9496 } 9497 9498 if (dp->dtdo_buf != NULL) 9499 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9500 if (dp->dtdo_inttab != NULL) 9501 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9502 if (dp->dtdo_strtab != NULL) 9503 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9504 if (dp->dtdo_vartab != NULL) 9505 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9506 9507 kmem_free(dp, sizeof (dtrace_difo_t)); 9508 } 9509 9510 static void 9511 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9512 { 9513 int i; 9514 9515 ASSERT(MUTEX_HELD(&dtrace_lock)); 9516 ASSERT(dp->dtdo_refcnt != 0); 9517 9518 for (i = 0; i < dp->dtdo_varlen; i++) { 9519 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9520 9521 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9522 continue; 9523 9524 ASSERT(dtrace_vtime_references > 0); 9525 if (--dtrace_vtime_references == 0) 9526 dtrace_vtime_disable(); 9527 } 9528 9529 if (--dp->dtdo_refcnt == 0) 9530 dtrace_difo_destroy(dp, vstate); 9531 } 9532 9533 /* 9534 * DTrace Format Functions 9535 */ 9536 static uint16_t 9537 dtrace_format_add(dtrace_state_t *state, char *str) 9538 { 9539 char *fmt, **new; 9540 uint16_t ndx, len = strlen(str) + 1; 9541 9542 fmt = kmem_zalloc(len, KM_SLEEP); 9543 bcopy(str, fmt, len); 9544 9545 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9546 if (state->dts_formats[ndx] == NULL) { 9547 state->dts_formats[ndx] = fmt; 9548 return (ndx + 1); 9549 } 9550 } 9551 9552 if (state->dts_nformats == USHRT_MAX) { 9553 /* 9554 * This is only likely if a denial-of-service attack is being 9555 * attempted. As such, it's okay to fail silently here. 9556 */ 9557 kmem_free(fmt, len); 9558 return (0); 9559 } 9560 9561 /* 9562 * For simplicity, we always resize the formats array to be exactly the 9563 * number of formats. 9564 */ 9565 ndx = state->dts_nformats++; 9566 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9567 9568 if (state->dts_formats != NULL) { 9569 ASSERT(ndx != 0); 9570 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9571 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9572 } 9573 9574 state->dts_formats = new; 9575 state->dts_formats[ndx] = fmt; 9576 9577 return (ndx + 1); 9578 } 9579 9580 static void 9581 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9582 { 9583 char *fmt; 9584 9585 ASSERT(state->dts_formats != NULL); 9586 ASSERT(format <= state->dts_nformats); 9587 ASSERT(state->dts_formats[format - 1] != NULL); 9588 9589 fmt = state->dts_formats[format - 1]; 9590 kmem_free(fmt, strlen(fmt) + 1); 9591 state->dts_formats[format - 1] = NULL; 9592 } 9593 9594 static void 9595 dtrace_format_destroy(dtrace_state_t *state) 9596 { 9597 int i; 9598 9599 if (state->dts_nformats == 0) { 9600 ASSERT(state->dts_formats == NULL); 9601 return; 9602 } 9603 9604 ASSERT(state->dts_formats != NULL); 9605 9606 for (i = 0; i < state->dts_nformats; i++) { 9607 char *fmt = state->dts_formats[i]; 9608 9609 if (fmt == NULL) 9610 continue; 9611 9612 kmem_free(fmt, strlen(fmt) + 1); 9613 } 9614 9615 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9616 state->dts_nformats = 0; 9617 state->dts_formats = NULL; 9618 } 9619 9620 /* 9621 * DTrace Predicate Functions 9622 */ 9623 static dtrace_predicate_t * 9624 dtrace_predicate_create(dtrace_difo_t *dp) 9625 { 9626 dtrace_predicate_t *pred; 9627 9628 ASSERT(MUTEX_HELD(&dtrace_lock)); 9629 ASSERT(dp->dtdo_refcnt != 0); 9630 9631 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9632 pred->dtp_difo = dp; 9633 pred->dtp_refcnt = 1; 9634 9635 if (!dtrace_difo_cacheable(dp)) 9636 return (pred); 9637 9638 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9639 /* 9640 * This is only theoretically possible -- we have had 2^32 9641 * cacheable predicates on this machine. We cannot allow any 9642 * more predicates to become cacheable: as unlikely as it is, 9643 * there may be a thread caching a (now stale) predicate cache 9644 * ID. (N.B.: the temptation is being successfully resisted to 9645 * have this cmn_err() "Holy shit -- we executed this code!") 9646 */ 9647 return (pred); 9648 } 9649 9650 pred->dtp_cacheid = dtrace_predcache_id++; 9651 9652 return (pred); 9653 } 9654 9655 static void 9656 dtrace_predicate_hold(dtrace_predicate_t *pred) 9657 { 9658 ASSERT(MUTEX_HELD(&dtrace_lock)); 9659 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9660 ASSERT(pred->dtp_refcnt > 0); 9661 9662 pred->dtp_refcnt++; 9663 } 9664 9665 static void 9666 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9667 { 9668 dtrace_difo_t *dp = pred->dtp_difo; 9669 9670 ASSERT(MUTEX_HELD(&dtrace_lock)); 9671 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9672 ASSERT(pred->dtp_refcnt > 0); 9673 9674 if (--pred->dtp_refcnt == 0) { 9675 dtrace_difo_release(pred->dtp_difo, vstate); 9676 kmem_free(pred, sizeof (dtrace_predicate_t)); 9677 } 9678 } 9679 9680 /* 9681 * DTrace Action Description Functions 9682 */ 9683 static dtrace_actdesc_t * 9684 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9685 uint64_t uarg, uint64_t arg) 9686 { 9687 dtrace_actdesc_t *act; 9688 9689 #if defined(sun) 9690 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9691 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9692 #endif 9693 9694 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9695 act->dtad_kind = kind; 9696 act->dtad_ntuple = ntuple; 9697 act->dtad_uarg = uarg; 9698 act->dtad_arg = arg; 9699 act->dtad_refcnt = 1; 9700 9701 return (act); 9702 } 9703 9704 static void 9705 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9706 { 9707 ASSERT(act->dtad_refcnt >= 1); 9708 act->dtad_refcnt++; 9709 } 9710 9711 static void 9712 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9713 { 9714 dtrace_actkind_t kind = act->dtad_kind; 9715 dtrace_difo_t *dp; 9716 9717 ASSERT(act->dtad_refcnt >= 1); 9718 9719 if (--act->dtad_refcnt != 0) 9720 return; 9721 9722 if ((dp = act->dtad_difo) != NULL) 9723 dtrace_difo_release(dp, vstate); 9724 9725 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9726 char *str = (char *)(uintptr_t)act->dtad_arg; 9727 9728 #if defined(sun) 9729 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9730 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9731 #endif 9732 9733 if (str != NULL) 9734 kmem_free(str, strlen(str) + 1); 9735 } 9736 9737 kmem_free(act, sizeof (dtrace_actdesc_t)); 9738 } 9739 9740 /* 9741 * DTrace ECB Functions 9742 */ 9743 static dtrace_ecb_t * 9744 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9745 { 9746 dtrace_ecb_t *ecb; 9747 dtrace_epid_t epid; 9748 9749 ASSERT(MUTEX_HELD(&dtrace_lock)); 9750 9751 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9752 ecb->dte_predicate = NULL; 9753 ecb->dte_probe = probe; 9754 9755 /* 9756 * The default size is the size of the default action: recording 9757 * the epid. 9758 */ 9759 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9760 ecb->dte_alignment = sizeof (dtrace_epid_t); 9761 9762 epid = state->dts_epid++; 9763 9764 if (epid - 1 >= state->dts_necbs) { 9765 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9766 int necbs = state->dts_necbs << 1; 9767 9768 ASSERT(epid == state->dts_necbs + 1); 9769 9770 if (necbs == 0) { 9771 ASSERT(oecbs == NULL); 9772 necbs = 1; 9773 } 9774 9775 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9776 9777 if (oecbs != NULL) 9778 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9779 9780 dtrace_membar_producer(); 9781 state->dts_ecbs = ecbs; 9782 9783 if (oecbs != NULL) { 9784 /* 9785 * If this state is active, we must dtrace_sync() 9786 * before we can free the old dts_ecbs array: we're 9787 * coming in hot, and there may be active ring 9788 * buffer processing (which indexes into the dts_ecbs 9789 * array) on another CPU. 9790 */ 9791 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9792 dtrace_sync(); 9793 9794 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9795 } 9796 9797 dtrace_membar_producer(); 9798 state->dts_necbs = necbs; 9799 } 9800 9801 ecb->dte_state = state; 9802 9803 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9804 dtrace_membar_producer(); 9805 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9806 9807 return (ecb); 9808 } 9809 9810 static void 9811 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9812 { 9813 dtrace_probe_t *probe = ecb->dte_probe; 9814 9815 ASSERT(MUTEX_HELD(&cpu_lock)); 9816 ASSERT(MUTEX_HELD(&dtrace_lock)); 9817 ASSERT(ecb->dte_next == NULL); 9818 9819 if (probe == NULL) { 9820 /* 9821 * This is the NULL probe -- there's nothing to do. 9822 */ 9823 return; 9824 } 9825 9826 if (probe->dtpr_ecb == NULL) { 9827 dtrace_provider_t *prov = probe->dtpr_provider; 9828 9829 /* 9830 * We're the first ECB on this probe. 9831 */ 9832 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9833 9834 if (ecb->dte_predicate != NULL) 9835 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9836 9837 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9838 probe->dtpr_id, probe->dtpr_arg); 9839 } else { 9840 /* 9841 * This probe is already active. Swing the last pointer to 9842 * point to the new ECB, and issue a dtrace_sync() to assure 9843 * that all CPUs have seen the change. 9844 */ 9845 ASSERT(probe->dtpr_ecb_last != NULL); 9846 probe->dtpr_ecb_last->dte_next = ecb; 9847 probe->dtpr_ecb_last = ecb; 9848 probe->dtpr_predcache = 0; 9849 9850 dtrace_sync(); 9851 } 9852 } 9853 9854 static void 9855 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9856 { 9857 uint32_t maxalign = sizeof (dtrace_epid_t); 9858 uint32_t align = sizeof (uint8_t), offs, diff; 9859 dtrace_action_t *act; 9860 int wastuple = 0; 9861 uint32_t aggbase = UINT32_MAX; 9862 dtrace_state_t *state = ecb->dte_state; 9863 9864 /* 9865 * If we record anything, we always record the epid. (And we always 9866 * record it first.) 9867 */ 9868 offs = sizeof (dtrace_epid_t); 9869 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9870 9871 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9872 dtrace_recdesc_t *rec = &act->dta_rec; 9873 9874 if ((align = rec->dtrd_alignment) > maxalign) 9875 maxalign = align; 9876 9877 if (!wastuple && act->dta_intuple) { 9878 /* 9879 * This is the first record in a tuple. Align the 9880 * offset to be at offset 4 in an 8-byte aligned 9881 * block. 9882 */ 9883 diff = offs + sizeof (dtrace_aggid_t); 9884 9885 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9886 offs += sizeof (uint64_t) - diff; 9887 9888 aggbase = offs - sizeof (dtrace_aggid_t); 9889 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9890 } 9891 9892 /*LINTED*/ 9893 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9894 /* 9895 * The current offset is not properly aligned; align it. 9896 */ 9897 offs += align - diff; 9898 } 9899 9900 rec->dtrd_offset = offs; 9901 9902 if (offs + rec->dtrd_size > ecb->dte_needed) { 9903 ecb->dte_needed = offs + rec->dtrd_size; 9904 9905 if (ecb->dte_needed > state->dts_needed) 9906 state->dts_needed = ecb->dte_needed; 9907 } 9908 9909 if (DTRACEACT_ISAGG(act->dta_kind)) { 9910 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9911 dtrace_action_t *first = agg->dtag_first, *prev; 9912 9913 ASSERT(rec->dtrd_size != 0 && first != NULL); 9914 ASSERT(wastuple); 9915 ASSERT(aggbase != UINT32_MAX); 9916 9917 agg->dtag_base = aggbase; 9918 9919 while ((prev = first->dta_prev) != NULL && 9920 DTRACEACT_ISAGG(prev->dta_kind)) { 9921 agg = (dtrace_aggregation_t *)prev; 9922 first = agg->dtag_first; 9923 } 9924 9925 if (prev != NULL) { 9926 offs = prev->dta_rec.dtrd_offset + 9927 prev->dta_rec.dtrd_size; 9928 } else { 9929 offs = sizeof (dtrace_epid_t); 9930 } 9931 wastuple = 0; 9932 } else { 9933 if (!act->dta_intuple) 9934 ecb->dte_size = offs + rec->dtrd_size; 9935 9936 offs += rec->dtrd_size; 9937 } 9938 9939 wastuple = act->dta_intuple; 9940 } 9941 9942 if ((act = ecb->dte_action) != NULL && 9943 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9944 ecb->dte_size == sizeof (dtrace_epid_t)) { 9945 /* 9946 * If the size is still sizeof (dtrace_epid_t), then all 9947 * actions store no data; set the size to 0. 9948 */ 9949 ecb->dte_alignment = maxalign; 9950 ecb->dte_size = 0; 9951 9952 /* 9953 * If the needed space is still sizeof (dtrace_epid_t), then 9954 * all actions need no additional space; set the needed 9955 * size to 0. 9956 */ 9957 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9958 ecb->dte_needed = 0; 9959 9960 return; 9961 } 9962 9963 /* 9964 * Set our alignment, and make sure that the dte_size and dte_needed 9965 * are aligned to the size of an EPID. 9966 */ 9967 ecb->dte_alignment = maxalign; 9968 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9969 ~(sizeof (dtrace_epid_t) - 1); 9970 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9971 ~(sizeof (dtrace_epid_t) - 1); 9972 ASSERT(ecb->dte_size <= ecb->dte_needed); 9973 } 9974 9975 static dtrace_action_t * 9976 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9977 { 9978 dtrace_aggregation_t *agg; 9979 size_t size = sizeof (uint64_t); 9980 int ntuple = desc->dtad_ntuple; 9981 dtrace_action_t *act; 9982 dtrace_recdesc_t *frec; 9983 dtrace_aggid_t aggid; 9984 dtrace_state_t *state = ecb->dte_state; 9985 9986 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9987 agg->dtag_ecb = ecb; 9988 9989 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9990 9991 switch (desc->dtad_kind) { 9992 case DTRACEAGG_MIN: 9993 agg->dtag_initial = INT64_MAX; 9994 agg->dtag_aggregate = dtrace_aggregate_min; 9995 break; 9996 9997 case DTRACEAGG_MAX: 9998 agg->dtag_initial = INT64_MIN; 9999 agg->dtag_aggregate = dtrace_aggregate_max; 10000 break; 10001 10002 case DTRACEAGG_COUNT: 10003 agg->dtag_aggregate = dtrace_aggregate_count; 10004 break; 10005 10006 case DTRACEAGG_QUANTIZE: 10007 agg->dtag_aggregate = dtrace_aggregate_quantize; 10008 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 10009 sizeof (uint64_t); 10010 break; 10011 10012 case DTRACEAGG_LQUANTIZE: { 10013 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10014 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10015 10016 agg->dtag_initial = desc->dtad_arg; 10017 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10018 10019 if (step == 0 || levels == 0) 10020 goto err; 10021 10022 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10023 break; 10024 } 10025 10026 case DTRACEAGG_LLQUANTIZE: { 10027 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10028 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10029 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10030 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10031 int64_t v; 10032 10033 agg->dtag_initial = desc->dtad_arg; 10034 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10035 10036 if (factor < 2 || low >= high || nsteps < factor) 10037 goto err; 10038 10039 /* 10040 * Now check that the number of steps evenly divides a power 10041 * of the factor. (This assures both integer bucket size and 10042 * linearity within each magnitude.) 10043 */ 10044 for (v = factor; v < nsteps; v *= factor) 10045 continue; 10046 10047 if ((v % nsteps) || (nsteps % factor)) 10048 goto err; 10049 10050 size = (dtrace_aggregate_llquantize_bucket(factor, 10051 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10052 break; 10053 } 10054 10055 case DTRACEAGG_AVG: 10056 agg->dtag_aggregate = dtrace_aggregate_avg; 10057 size = sizeof (uint64_t) * 2; 10058 break; 10059 10060 case DTRACEAGG_STDDEV: 10061 agg->dtag_aggregate = dtrace_aggregate_stddev; 10062 size = sizeof (uint64_t) * 4; 10063 break; 10064 10065 case DTRACEAGG_SUM: 10066 agg->dtag_aggregate = dtrace_aggregate_sum; 10067 break; 10068 10069 default: 10070 goto err; 10071 } 10072 10073 agg->dtag_action.dta_rec.dtrd_size = size; 10074 10075 if (ntuple == 0) 10076 goto err; 10077 10078 /* 10079 * We must make sure that we have enough actions for the n-tuple. 10080 */ 10081 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10082 if (DTRACEACT_ISAGG(act->dta_kind)) 10083 break; 10084 10085 if (--ntuple == 0) { 10086 /* 10087 * This is the action with which our n-tuple begins. 10088 */ 10089 agg->dtag_first = act; 10090 goto success; 10091 } 10092 } 10093 10094 /* 10095 * This n-tuple is short by ntuple elements. Return failure. 10096 */ 10097 ASSERT(ntuple != 0); 10098 err: 10099 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10100 return (NULL); 10101 10102 success: 10103 /* 10104 * If the last action in the tuple has a size of zero, it's actually 10105 * an expression argument for the aggregating action. 10106 */ 10107 ASSERT(ecb->dte_action_last != NULL); 10108 act = ecb->dte_action_last; 10109 10110 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10111 ASSERT(act->dta_difo != NULL); 10112 10113 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10114 agg->dtag_hasarg = 1; 10115 } 10116 10117 /* 10118 * We need to allocate an id for this aggregation. 10119 */ 10120 #if defined(sun) 10121 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10122 VM_BESTFIT | VM_SLEEP); 10123 #else 10124 aggid = alloc_unr(state->dts_aggid_arena); 10125 #endif 10126 10127 if (aggid - 1 >= state->dts_naggregations) { 10128 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10129 dtrace_aggregation_t **aggs; 10130 int naggs = state->dts_naggregations << 1; 10131 int onaggs = state->dts_naggregations; 10132 10133 ASSERT(aggid == state->dts_naggregations + 1); 10134 10135 if (naggs == 0) { 10136 ASSERT(oaggs == NULL); 10137 naggs = 1; 10138 } 10139 10140 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10141 10142 if (oaggs != NULL) { 10143 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10144 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10145 } 10146 10147 state->dts_aggregations = aggs; 10148 state->dts_naggregations = naggs; 10149 } 10150 10151 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10152 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10153 10154 frec = &agg->dtag_first->dta_rec; 10155 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10156 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10157 10158 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10159 ASSERT(!act->dta_intuple); 10160 act->dta_intuple = 1; 10161 } 10162 10163 return (&agg->dtag_action); 10164 } 10165 10166 static void 10167 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10168 { 10169 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10170 dtrace_state_t *state = ecb->dte_state; 10171 dtrace_aggid_t aggid = agg->dtag_id; 10172 10173 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10174 #if defined(sun) 10175 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10176 #else 10177 free_unr(state->dts_aggid_arena, aggid); 10178 #endif 10179 10180 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10181 state->dts_aggregations[aggid - 1] = NULL; 10182 10183 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10184 } 10185 10186 static int 10187 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10188 { 10189 dtrace_action_t *action, *last; 10190 dtrace_difo_t *dp = desc->dtad_difo; 10191 uint32_t size = 0, align = sizeof (uint8_t), mask; 10192 uint16_t format = 0; 10193 dtrace_recdesc_t *rec; 10194 dtrace_state_t *state = ecb->dte_state; 10195 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10196 uint64_t arg = desc->dtad_arg; 10197 10198 ASSERT(MUTEX_HELD(&dtrace_lock)); 10199 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10200 10201 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10202 /* 10203 * If this is an aggregating action, there must be neither 10204 * a speculate nor a commit on the action chain. 10205 */ 10206 dtrace_action_t *act; 10207 10208 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10209 if (act->dta_kind == DTRACEACT_COMMIT) 10210 return (EINVAL); 10211 10212 if (act->dta_kind == DTRACEACT_SPECULATE) 10213 return (EINVAL); 10214 } 10215 10216 action = dtrace_ecb_aggregation_create(ecb, desc); 10217 10218 if (action == NULL) 10219 return (EINVAL); 10220 } else { 10221 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10222 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10223 dp != NULL && dp->dtdo_destructive)) { 10224 state->dts_destructive = 1; 10225 } 10226 10227 switch (desc->dtad_kind) { 10228 case DTRACEACT_PRINTF: 10229 case DTRACEACT_PRINTA: 10230 case DTRACEACT_SYSTEM: 10231 case DTRACEACT_FREOPEN: 10232 case DTRACEACT_DIFEXPR: 10233 /* 10234 * We know that our arg is a string -- turn it into a 10235 * format. 10236 */ 10237 if (arg == 0) { 10238 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10239 desc->dtad_kind == DTRACEACT_DIFEXPR); 10240 format = 0; 10241 } else { 10242 ASSERT(arg != 0); 10243 #if defined(sun) 10244 ASSERT(arg > KERNELBASE); 10245 #endif 10246 format = dtrace_format_add(state, 10247 (char *)(uintptr_t)arg); 10248 } 10249 10250 /*FALLTHROUGH*/ 10251 case DTRACEACT_LIBACT: 10252 case DTRACEACT_TRACEMEM: 10253 case DTRACEACT_TRACEMEM_DYNSIZE: 10254 if (dp == NULL) 10255 return (EINVAL); 10256 10257 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10258 break; 10259 10260 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10261 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10262 return (EINVAL); 10263 10264 size = opt[DTRACEOPT_STRSIZE]; 10265 } 10266 10267 break; 10268 10269 case DTRACEACT_STACK: 10270 if ((nframes = arg) == 0) { 10271 nframes = opt[DTRACEOPT_STACKFRAMES]; 10272 ASSERT(nframes > 0); 10273 arg = nframes; 10274 } 10275 10276 size = nframes * sizeof (pc_t); 10277 break; 10278 10279 case DTRACEACT_JSTACK: 10280 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10281 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10282 10283 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10284 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10285 10286 arg = DTRACE_USTACK_ARG(nframes, strsize); 10287 10288 /*FALLTHROUGH*/ 10289 case DTRACEACT_USTACK: 10290 if (desc->dtad_kind != DTRACEACT_JSTACK && 10291 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10292 strsize = DTRACE_USTACK_STRSIZE(arg); 10293 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10294 ASSERT(nframes > 0); 10295 arg = DTRACE_USTACK_ARG(nframes, strsize); 10296 } 10297 10298 /* 10299 * Save a slot for the pid. 10300 */ 10301 size = (nframes + 1) * sizeof (uint64_t); 10302 size += DTRACE_USTACK_STRSIZE(arg); 10303 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10304 10305 break; 10306 10307 case DTRACEACT_SYM: 10308 case DTRACEACT_MOD: 10309 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10310 sizeof (uint64_t)) || 10311 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10312 return (EINVAL); 10313 break; 10314 10315 case DTRACEACT_USYM: 10316 case DTRACEACT_UMOD: 10317 case DTRACEACT_UADDR: 10318 if (dp == NULL || 10319 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10320 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10321 return (EINVAL); 10322 10323 /* 10324 * We have a slot for the pid, plus a slot for the 10325 * argument. To keep things simple (aligned with 10326 * bitness-neutral sizing), we store each as a 64-bit 10327 * quantity. 10328 */ 10329 size = 2 * sizeof (uint64_t); 10330 break; 10331 10332 case DTRACEACT_STOP: 10333 case DTRACEACT_BREAKPOINT: 10334 case DTRACEACT_PANIC: 10335 break; 10336 10337 case DTRACEACT_CHILL: 10338 case DTRACEACT_DISCARD: 10339 case DTRACEACT_RAISE: 10340 if (dp == NULL) 10341 return (EINVAL); 10342 break; 10343 10344 case DTRACEACT_EXIT: 10345 if (dp == NULL || 10346 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10347 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10348 return (EINVAL); 10349 break; 10350 10351 case DTRACEACT_SPECULATE: 10352 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10353 return (EINVAL); 10354 10355 if (dp == NULL) 10356 return (EINVAL); 10357 10358 state->dts_speculates = 1; 10359 break; 10360 10361 case DTRACEACT_PRINTM: 10362 size = dp->dtdo_rtype.dtdt_size; 10363 break; 10364 10365 case DTRACEACT_PRINTT: 10366 size = dp->dtdo_rtype.dtdt_size; 10367 break; 10368 10369 case DTRACEACT_COMMIT: { 10370 dtrace_action_t *act = ecb->dte_action; 10371 10372 for (; act != NULL; act = act->dta_next) { 10373 if (act->dta_kind == DTRACEACT_COMMIT) 10374 return (EINVAL); 10375 } 10376 10377 if (dp == NULL) 10378 return (EINVAL); 10379 break; 10380 } 10381 10382 default: 10383 return (EINVAL); 10384 } 10385 10386 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10387 /* 10388 * If this is a data-storing action or a speculate, 10389 * we must be sure that there isn't a commit on the 10390 * action chain. 10391 */ 10392 dtrace_action_t *act = ecb->dte_action; 10393 10394 for (; act != NULL; act = act->dta_next) { 10395 if (act->dta_kind == DTRACEACT_COMMIT) 10396 return (EINVAL); 10397 } 10398 } 10399 10400 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10401 action->dta_rec.dtrd_size = size; 10402 } 10403 10404 action->dta_refcnt = 1; 10405 rec = &action->dta_rec; 10406 size = rec->dtrd_size; 10407 10408 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10409 if (!(size & mask)) { 10410 align = mask + 1; 10411 break; 10412 } 10413 } 10414 10415 action->dta_kind = desc->dtad_kind; 10416 10417 if ((action->dta_difo = dp) != NULL) 10418 dtrace_difo_hold(dp); 10419 10420 rec->dtrd_action = action->dta_kind; 10421 rec->dtrd_arg = arg; 10422 rec->dtrd_uarg = desc->dtad_uarg; 10423 rec->dtrd_alignment = (uint16_t)align; 10424 rec->dtrd_format = format; 10425 10426 if ((last = ecb->dte_action_last) != NULL) { 10427 ASSERT(ecb->dte_action != NULL); 10428 action->dta_prev = last; 10429 last->dta_next = action; 10430 } else { 10431 ASSERT(ecb->dte_action == NULL); 10432 ecb->dte_action = action; 10433 } 10434 10435 ecb->dte_action_last = action; 10436 10437 return (0); 10438 } 10439 10440 static void 10441 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10442 { 10443 dtrace_action_t *act = ecb->dte_action, *next; 10444 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10445 dtrace_difo_t *dp; 10446 uint16_t format; 10447 10448 if (act != NULL && act->dta_refcnt > 1) { 10449 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10450 act->dta_refcnt--; 10451 } else { 10452 for (; act != NULL; act = next) { 10453 next = act->dta_next; 10454 ASSERT(next != NULL || act == ecb->dte_action_last); 10455 ASSERT(act->dta_refcnt == 1); 10456 10457 if ((format = act->dta_rec.dtrd_format) != 0) 10458 dtrace_format_remove(ecb->dte_state, format); 10459 10460 if ((dp = act->dta_difo) != NULL) 10461 dtrace_difo_release(dp, vstate); 10462 10463 if (DTRACEACT_ISAGG(act->dta_kind)) { 10464 dtrace_ecb_aggregation_destroy(ecb, act); 10465 } else { 10466 kmem_free(act, sizeof (dtrace_action_t)); 10467 } 10468 } 10469 } 10470 10471 ecb->dte_action = NULL; 10472 ecb->dte_action_last = NULL; 10473 ecb->dte_size = sizeof (dtrace_epid_t); 10474 } 10475 10476 static void 10477 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10478 { 10479 /* 10480 * We disable the ECB by removing it from its probe. 10481 */ 10482 dtrace_ecb_t *pecb, *prev = NULL; 10483 dtrace_probe_t *probe = ecb->dte_probe; 10484 10485 ASSERT(MUTEX_HELD(&dtrace_lock)); 10486 10487 if (probe == NULL) { 10488 /* 10489 * This is the NULL probe; there is nothing to disable. 10490 */ 10491 return; 10492 } 10493 10494 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10495 if (pecb == ecb) 10496 break; 10497 prev = pecb; 10498 } 10499 10500 ASSERT(pecb != NULL); 10501 10502 if (prev == NULL) { 10503 probe->dtpr_ecb = ecb->dte_next; 10504 } else { 10505 prev->dte_next = ecb->dte_next; 10506 } 10507 10508 if (ecb == probe->dtpr_ecb_last) { 10509 ASSERT(ecb->dte_next == NULL); 10510 probe->dtpr_ecb_last = prev; 10511 } 10512 10513 /* 10514 * The ECB has been disconnected from the probe; now sync to assure 10515 * that all CPUs have seen the change before returning. 10516 */ 10517 dtrace_sync(); 10518 10519 if (probe->dtpr_ecb == NULL) { 10520 /* 10521 * That was the last ECB on the probe; clear the predicate 10522 * cache ID for the probe, disable it and sync one more time 10523 * to assure that we'll never hit it again. 10524 */ 10525 dtrace_provider_t *prov = probe->dtpr_provider; 10526 10527 ASSERT(ecb->dte_next == NULL); 10528 ASSERT(probe->dtpr_ecb_last == NULL); 10529 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10530 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10531 probe->dtpr_id, probe->dtpr_arg); 10532 dtrace_sync(); 10533 } else { 10534 /* 10535 * There is at least one ECB remaining on the probe. If there 10536 * is _exactly_ one, set the probe's predicate cache ID to be 10537 * the predicate cache ID of the remaining ECB. 10538 */ 10539 ASSERT(probe->dtpr_ecb_last != NULL); 10540 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10541 10542 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10543 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10544 10545 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10546 10547 if (p != NULL) 10548 probe->dtpr_predcache = p->dtp_cacheid; 10549 } 10550 10551 ecb->dte_next = NULL; 10552 } 10553 } 10554 10555 static void 10556 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10557 { 10558 dtrace_state_t *state = ecb->dte_state; 10559 dtrace_vstate_t *vstate = &state->dts_vstate; 10560 dtrace_predicate_t *pred; 10561 dtrace_epid_t epid = ecb->dte_epid; 10562 10563 ASSERT(MUTEX_HELD(&dtrace_lock)); 10564 ASSERT(ecb->dte_next == NULL); 10565 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10566 10567 if ((pred = ecb->dte_predicate) != NULL) 10568 dtrace_predicate_release(pred, vstate); 10569 10570 dtrace_ecb_action_remove(ecb); 10571 10572 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10573 state->dts_ecbs[epid - 1] = NULL; 10574 10575 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10576 } 10577 10578 static dtrace_ecb_t * 10579 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10580 dtrace_enabling_t *enab) 10581 { 10582 dtrace_ecb_t *ecb; 10583 dtrace_predicate_t *pred; 10584 dtrace_actdesc_t *act; 10585 dtrace_provider_t *prov; 10586 dtrace_ecbdesc_t *desc = enab->dten_current; 10587 10588 ASSERT(MUTEX_HELD(&dtrace_lock)); 10589 ASSERT(state != NULL); 10590 10591 ecb = dtrace_ecb_add(state, probe); 10592 ecb->dte_uarg = desc->dted_uarg; 10593 10594 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10595 dtrace_predicate_hold(pred); 10596 ecb->dte_predicate = pred; 10597 } 10598 10599 if (probe != NULL) { 10600 /* 10601 * If the provider shows more leg than the consumer is old 10602 * enough to see, we need to enable the appropriate implicit 10603 * predicate bits to prevent the ecb from activating at 10604 * revealing times. 10605 * 10606 * Providers specifying DTRACE_PRIV_USER at register time 10607 * are stating that they need the /proc-style privilege 10608 * model to be enforced, and this is what DTRACE_COND_OWNER 10609 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10610 */ 10611 prov = probe->dtpr_provider; 10612 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10613 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10614 ecb->dte_cond |= DTRACE_COND_OWNER; 10615 10616 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10617 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10618 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10619 10620 /* 10621 * If the provider shows us kernel innards and the user 10622 * is lacking sufficient privilege, enable the 10623 * DTRACE_COND_USERMODE implicit predicate. 10624 */ 10625 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10626 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10627 ecb->dte_cond |= DTRACE_COND_USERMODE; 10628 } 10629 10630 if (dtrace_ecb_create_cache != NULL) { 10631 /* 10632 * If we have a cached ecb, we'll use its action list instead 10633 * of creating our own (saving both time and space). 10634 */ 10635 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10636 dtrace_action_t *act = cached->dte_action; 10637 10638 if (act != NULL) { 10639 ASSERT(act->dta_refcnt > 0); 10640 act->dta_refcnt++; 10641 ecb->dte_action = act; 10642 ecb->dte_action_last = cached->dte_action_last; 10643 ecb->dte_needed = cached->dte_needed; 10644 ecb->dte_size = cached->dte_size; 10645 ecb->dte_alignment = cached->dte_alignment; 10646 } 10647 10648 return (ecb); 10649 } 10650 10651 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10652 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10653 dtrace_ecb_destroy(ecb); 10654 return (NULL); 10655 } 10656 } 10657 10658 dtrace_ecb_resize(ecb); 10659 10660 return (dtrace_ecb_create_cache = ecb); 10661 } 10662 10663 static int 10664 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10665 { 10666 dtrace_ecb_t *ecb; 10667 dtrace_enabling_t *enab = arg; 10668 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10669 10670 ASSERT(state != NULL); 10671 10672 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10673 /* 10674 * This probe was created in a generation for which this 10675 * enabling has previously created ECBs; we don't want to 10676 * enable it again, so just kick out. 10677 */ 10678 return (DTRACE_MATCH_NEXT); 10679 } 10680 10681 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10682 return (DTRACE_MATCH_DONE); 10683 10684 dtrace_ecb_enable(ecb); 10685 return (DTRACE_MATCH_NEXT); 10686 } 10687 10688 static dtrace_ecb_t * 10689 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10690 { 10691 dtrace_ecb_t *ecb; 10692 10693 ASSERT(MUTEX_HELD(&dtrace_lock)); 10694 10695 if (id == 0 || id > state->dts_necbs) 10696 return (NULL); 10697 10698 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10699 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10700 10701 return (state->dts_ecbs[id - 1]); 10702 } 10703 10704 static dtrace_aggregation_t * 10705 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10706 { 10707 dtrace_aggregation_t *agg; 10708 10709 ASSERT(MUTEX_HELD(&dtrace_lock)); 10710 10711 if (id == 0 || id > state->dts_naggregations) 10712 return (NULL); 10713 10714 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10715 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10716 agg->dtag_id == id); 10717 10718 return (state->dts_aggregations[id - 1]); 10719 } 10720 10721 /* 10722 * DTrace Buffer Functions 10723 * 10724 * The following functions manipulate DTrace buffers. Most of these functions 10725 * are called in the context of establishing or processing consumer state; 10726 * exceptions are explicitly noted. 10727 */ 10728 10729 /* 10730 * Note: called from cross call context. This function switches the two 10731 * buffers on a given CPU. The atomicity of this operation is assured by 10732 * disabling interrupts while the actual switch takes place; the disabling of 10733 * interrupts serializes the execution with any execution of dtrace_probe() on 10734 * the same CPU. 10735 */ 10736 static void 10737 dtrace_buffer_switch(dtrace_buffer_t *buf) 10738 { 10739 caddr_t tomax = buf->dtb_tomax; 10740 caddr_t xamot = buf->dtb_xamot; 10741 dtrace_icookie_t cookie; 10742 hrtime_t now = dtrace_gethrtime(); 10743 10744 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10745 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10746 10747 cookie = dtrace_interrupt_disable(); 10748 buf->dtb_tomax = xamot; 10749 buf->dtb_xamot = tomax; 10750 buf->dtb_xamot_drops = buf->dtb_drops; 10751 buf->dtb_xamot_offset = buf->dtb_offset; 10752 buf->dtb_xamot_errors = buf->dtb_errors; 10753 buf->dtb_xamot_flags = buf->dtb_flags; 10754 buf->dtb_offset = 0; 10755 buf->dtb_drops = 0; 10756 buf->dtb_errors = 0; 10757 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10758 buf->dtb_interval = now - buf->dtb_switched; 10759 buf->dtb_switched = now; 10760 dtrace_interrupt_enable(cookie); 10761 } 10762 10763 /* 10764 * Note: called from cross call context. This function activates a buffer 10765 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10766 * is guaranteed by the disabling of interrupts. 10767 */ 10768 static void 10769 dtrace_buffer_activate(dtrace_state_t *state) 10770 { 10771 dtrace_buffer_t *buf; 10772 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10773 10774 buf = &state->dts_buffer[curcpu]; 10775 10776 if (buf->dtb_tomax != NULL) { 10777 /* 10778 * We might like to assert that the buffer is marked inactive, 10779 * but this isn't necessarily true: the buffer for the CPU 10780 * that processes the BEGIN probe has its buffer activated 10781 * manually. In this case, we take the (harmless) action 10782 * re-clearing the bit INACTIVE bit. 10783 */ 10784 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10785 } 10786 10787 dtrace_interrupt_enable(cookie); 10788 } 10789 10790 static int 10791 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10792 processorid_t cpu) 10793 { 10794 #if defined(sun) 10795 cpu_t *cp; 10796 #endif 10797 dtrace_buffer_t *buf; 10798 10799 #if defined(sun) 10800 ASSERT(MUTEX_HELD(&cpu_lock)); 10801 ASSERT(MUTEX_HELD(&dtrace_lock)); 10802 10803 if (size > dtrace_nonroot_maxsize && 10804 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10805 return (EFBIG); 10806 10807 cp = cpu_list; 10808 10809 do { 10810 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10811 continue; 10812 10813 buf = &bufs[cp->cpu_id]; 10814 10815 /* 10816 * If there is already a buffer allocated for this CPU, it 10817 * is only possible that this is a DR event. In this case, 10818 */ 10819 if (buf->dtb_tomax != NULL) { 10820 ASSERT(buf->dtb_size == size); 10821 continue; 10822 } 10823 10824 ASSERT(buf->dtb_xamot == NULL); 10825 10826 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10827 goto err; 10828 10829 buf->dtb_size = size; 10830 buf->dtb_flags = flags; 10831 buf->dtb_offset = 0; 10832 buf->dtb_drops = 0; 10833 10834 if (flags & DTRACEBUF_NOSWITCH) 10835 continue; 10836 10837 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10838 goto err; 10839 } while ((cp = cp->cpu_next) != cpu_list); 10840 10841 return (0); 10842 10843 err: 10844 cp = cpu_list; 10845 10846 do { 10847 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10848 continue; 10849 10850 buf = &bufs[cp->cpu_id]; 10851 10852 if (buf->dtb_xamot != NULL) { 10853 ASSERT(buf->dtb_tomax != NULL); 10854 ASSERT(buf->dtb_size == size); 10855 kmem_free(buf->dtb_xamot, size); 10856 } 10857 10858 if (buf->dtb_tomax != NULL) { 10859 ASSERT(buf->dtb_size == size); 10860 kmem_free(buf->dtb_tomax, size); 10861 } 10862 10863 buf->dtb_tomax = NULL; 10864 buf->dtb_xamot = NULL; 10865 buf->dtb_size = 0; 10866 } while ((cp = cp->cpu_next) != cpu_list); 10867 10868 return (ENOMEM); 10869 #else 10870 int i; 10871 10872 #if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 10873 /* 10874 * FreeBSD isn't good at limiting the amount of memory we 10875 * ask to malloc, so let's place a limit here before trying 10876 * to do something that might well end in tears at bedtime. 10877 */ 10878 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10879 return(ENOMEM); 10880 #endif 10881 10882 ASSERT(MUTEX_HELD(&dtrace_lock)); 10883 CPU_FOREACH(i) { 10884 if (cpu != DTRACE_CPUALL && cpu != i) 10885 continue; 10886 10887 buf = &bufs[i]; 10888 10889 /* 10890 * If there is already a buffer allocated for this CPU, it 10891 * is only possible that this is a DR event. In this case, 10892 * the buffer size must match our specified size. 10893 */ 10894 if (buf->dtb_tomax != NULL) { 10895 ASSERT(buf->dtb_size == size); 10896 continue; 10897 } 10898 10899 ASSERT(buf->dtb_xamot == NULL); 10900 10901 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10902 goto err; 10903 10904 buf->dtb_size = size; 10905 buf->dtb_flags = flags; 10906 buf->dtb_offset = 0; 10907 buf->dtb_drops = 0; 10908 10909 if (flags & DTRACEBUF_NOSWITCH) 10910 continue; 10911 10912 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10913 goto err; 10914 } 10915 10916 return (0); 10917 10918 err: 10919 /* 10920 * Error allocating memory, so free the buffers that were 10921 * allocated before the failed allocation. 10922 */ 10923 CPU_FOREACH(i) { 10924 if (cpu != DTRACE_CPUALL && cpu != i) 10925 continue; 10926 10927 buf = &bufs[i]; 10928 10929 if (buf->dtb_xamot != NULL) { 10930 ASSERT(buf->dtb_tomax != NULL); 10931 ASSERT(buf->dtb_size == size); 10932 kmem_free(buf->dtb_xamot, size); 10933 } 10934 10935 if (buf->dtb_tomax != NULL) { 10936 ASSERT(buf->dtb_size == size); 10937 kmem_free(buf->dtb_tomax, size); 10938 } 10939 10940 buf->dtb_tomax = NULL; 10941 buf->dtb_xamot = NULL; 10942 buf->dtb_size = 0; 10943 10944 } 10945 10946 return (ENOMEM); 10947 #endif 10948 } 10949 10950 /* 10951 * Note: called from probe context. This function just increments the drop 10952 * count on a buffer. It has been made a function to allow for the 10953 * possibility of understanding the source of mysterious drop counts. (A 10954 * problem for which one may be particularly disappointed that DTrace cannot 10955 * be used to understand DTrace.) 10956 */ 10957 static void 10958 dtrace_buffer_drop(dtrace_buffer_t *buf) 10959 { 10960 buf->dtb_drops++; 10961 } 10962 10963 /* 10964 * Note: called from probe context. This function is called to reserve space 10965 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10966 * mstate. Returns the new offset in the buffer, or a negative value if an 10967 * error has occurred. 10968 */ 10969 static intptr_t 10970 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10971 dtrace_state_t *state, dtrace_mstate_t *mstate) 10972 { 10973 intptr_t offs = buf->dtb_offset, soffs; 10974 intptr_t woffs; 10975 caddr_t tomax; 10976 size_t total; 10977 10978 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10979 return (-1); 10980 10981 if ((tomax = buf->dtb_tomax) == NULL) { 10982 dtrace_buffer_drop(buf); 10983 return (-1); 10984 } 10985 10986 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10987 while (offs & (align - 1)) { 10988 /* 10989 * Assert that our alignment is off by a number which 10990 * is itself sizeof (uint32_t) aligned. 10991 */ 10992 ASSERT(!((align - (offs & (align - 1))) & 10993 (sizeof (uint32_t) - 1))); 10994 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10995 offs += sizeof (uint32_t); 10996 } 10997 10998 if ((soffs = offs + needed) > buf->dtb_size) { 10999 dtrace_buffer_drop(buf); 11000 return (-1); 11001 } 11002 11003 if (mstate == NULL) 11004 return (offs); 11005 11006 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 11007 mstate->dtms_scratch_size = buf->dtb_size - soffs; 11008 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11009 11010 return (offs); 11011 } 11012 11013 if (buf->dtb_flags & DTRACEBUF_FILL) { 11014 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11015 (buf->dtb_flags & DTRACEBUF_FULL)) 11016 return (-1); 11017 goto out; 11018 } 11019 11020 total = needed + (offs & (align - 1)); 11021 11022 /* 11023 * For a ring buffer, life is quite a bit more complicated. Before 11024 * we can store any padding, we need to adjust our wrapping offset. 11025 * (If we've never before wrapped or we're not about to, no adjustment 11026 * is required.) 11027 */ 11028 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11029 offs + total > buf->dtb_size) { 11030 woffs = buf->dtb_xamot_offset; 11031 11032 if (offs + total > buf->dtb_size) { 11033 /* 11034 * We can't fit in the end of the buffer. First, a 11035 * sanity check that we can fit in the buffer at all. 11036 */ 11037 if (total > buf->dtb_size) { 11038 dtrace_buffer_drop(buf); 11039 return (-1); 11040 } 11041 11042 /* 11043 * We're going to be storing at the top of the buffer, 11044 * so now we need to deal with the wrapped offset. We 11045 * only reset our wrapped offset to 0 if it is 11046 * currently greater than the current offset. If it 11047 * is less than the current offset, it is because a 11048 * previous allocation induced a wrap -- but the 11049 * allocation didn't subsequently take the space due 11050 * to an error or false predicate evaluation. In this 11051 * case, we'll just leave the wrapped offset alone: if 11052 * the wrapped offset hasn't been advanced far enough 11053 * for this allocation, it will be adjusted in the 11054 * lower loop. 11055 */ 11056 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11057 if (woffs >= offs) 11058 woffs = 0; 11059 } else { 11060 woffs = 0; 11061 } 11062 11063 /* 11064 * Now we know that we're going to be storing to the 11065 * top of the buffer and that there is room for us 11066 * there. We need to clear the buffer from the current 11067 * offset to the end (there may be old gunk there). 11068 */ 11069 while (offs < buf->dtb_size) 11070 tomax[offs++] = 0; 11071 11072 /* 11073 * We need to set our offset to zero. And because we 11074 * are wrapping, we need to set the bit indicating as 11075 * much. We can also adjust our needed space back 11076 * down to the space required by the ECB -- we know 11077 * that the top of the buffer is aligned. 11078 */ 11079 offs = 0; 11080 total = needed; 11081 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11082 } else { 11083 /* 11084 * There is room for us in the buffer, so we simply 11085 * need to check the wrapped offset. 11086 */ 11087 if (woffs < offs) { 11088 /* 11089 * The wrapped offset is less than the offset. 11090 * This can happen if we allocated buffer space 11091 * that induced a wrap, but then we didn't 11092 * subsequently take the space due to an error 11093 * or false predicate evaluation. This is 11094 * okay; we know that _this_ allocation isn't 11095 * going to induce a wrap. We still can't 11096 * reset the wrapped offset to be zero, 11097 * however: the space may have been trashed in 11098 * the previous failed probe attempt. But at 11099 * least the wrapped offset doesn't need to 11100 * be adjusted at all... 11101 */ 11102 goto out; 11103 } 11104 } 11105 11106 while (offs + total > woffs) { 11107 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11108 size_t size; 11109 11110 if (epid == DTRACE_EPIDNONE) { 11111 size = sizeof (uint32_t); 11112 } else { 11113 ASSERT(epid <= state->dts_necbs); 11114 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11115 11116 size = state->dts_ecbs[epid - 1]->dte_size; 11117 } 11118 11119 ASSERT(woffs + size <= buf->dtb_size); 11120 ASSERT(size != 0); 11121 11122 if (woffs + size == buf->dtb_size) { 11123 /* 11124 * We've reached the end of the buffer; we want 11125 * to set the wrapped offset to 0 and break 11126 * out. However, if the offs is 0, then we're 11127 * in a strange edge-condition: the amount of 11128 * space that we want to reserve plus the size 11129 * of the record that we're overwriting is 11130 * greater than the size of the buffer. This 11131 * is problematic because if we reserve the 11132 * space but subsequently don't consume it (due 11133 * to a failed predicate or error) the wrapped 11134 * offset will be 0 -- yet the EPID at offset 0 11135 * will not be committed. This situation is 11136 * relatively easy to deal with: if we're in 11137 * this case, the buffer is indistinguishable 11138 * from one that hasn't wrapped; we need only 11139 * finish the job by clearing the wrapped bit, 11140 * explicitly setting the offset to be 0, and 11141 * zero'ing out the old data in the buffer. 11142 */ 11143 if (offs == 0) { 11144 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11145 buf->dtb_offset = 0; 11146 woffs = total; 11147 11148 while (woffs < buf->dtb_size) 11149 tomax[woffs++] = 0; 11150 } 11151 11152 woffs = 0; 11153 break; 11154 } 11155 11156 woffs += size; 11157 } 11158 11159 /* 11160 * We have a wrapped offset. It may be that the wrapped offset 11161 * has become zero -- that's okay. 11162 */ 11163 buf->dtb_xamot_offset = woffs; 11164 } 11165 11166 out: 11167 /* 11168 * Now we can plow the buffer with any necessary padding. 11169 */ 11170 while (offs & (align - 1)) { 11171 /* 11172 * Assert that our alignment is off by a number which 11173 * is itself sizeof (uint32_t) aligned. 11174 */ 11175 ASSERT(!((align - (offs & (align - 1))) & 11176 (sizeof (uint32_t) - 1))); 11177 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11178 offs += sizeof (uint32_t); 11179 } 11180 11181 if (buf->dtb_flags & DTRACEBUF_FILL) { 11182 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11183 buf->dtb_flags |= DTRACEBUF_FULL; 11184 return (-1); 11185 } 11186 } 11187 11188 if (mstate == NULL) 11189 return (offs); 11190 11191 /* 11192 * For ring buffers and fill buffers, the scratch space is always 11193 * the inactive buffer. 11194 */ 11195 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11196 mstate->dtms_scratch_size = buf->dtb_size; 11197 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11198 11199 return (offs); 11200 } 11201 11202 static void 11203 dtrace_buffer_polish(dtrace_buffer_t *buf) 11204 { 11205 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11206 ASSERT(MUTEX_HELD(&dtrace_lock)); 11207 11208 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11209 return; 11210 11211 /* 11212 * We need to polish the ring buffer. There are three cases: 11213 * 11214 * - The first (and presumably most common) is that there is no gap 11215 * between the buffer offset and the wrapped offset. In this case, 11216 * there is nothing in the buffer that isn't valid data; we can 11217 * mark the buffer as polished and return. 11218 * 11219 * - The second (less common than the first but still more common 11220 * than the third) is that there is a gap between the buffer offset 11221 * and the wrapped offset, and the wrapped offset is larger than the 11222 * buffer offset. This can happen because of an alignment issue, or 11223 * can happen because of a call to dtrace_buffer_reserve() that 11224 * didn't subsequently consume the buffer space. In this case, 11225 * we need to zero the data from the buffer offset to the wrapped 11226 * offset. 11227 * 11228 * - The third (and least common) is that there is a gap between the 11229 * buffer offset and the wrapped offset, but the wrapped offset is 11230 * _less_ than the buffer offset. This can only happen because a 11231 * call to dtrace_buffer_reserve() induced a wrap, but the space 11232 * was not subsequently consumed. In this case, we need to zero the 11233 * space from the offset to the end of the buffer _and_ from the 11234 * top of the buffer to the wrapped offset. 11235 */ 11236 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11237 bzero(buf->dtb_tomax + buf->dtb_offset, 11238 buf->dtb_xamot_offset - buf->dtb_offset); 11239 } 11240 11241 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11242 bzero(buf->dtb_tomax + buf->dtb_offset, 11243 buf->dtb_size - buf->dtb_offset); 11244 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11245 } 11246 } 11247 11248 /* 11249 * This routine determines if data generated at the specified time has likely 11250 * been entirely consumed at user-level. This routine is called to determine 11251 * if an ECB on a defunct probe (but for an active enabling) can be safely 11252 * disabled and destroyed. 11253 */ 11254 static int 11255 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11256 { 11257 int i; 11258 11259 for (i = 0; i < NCPU; i++) { 11260 dtrace_buffer_t *buf = &bufs[i]; 11261 11262 if (buf->dtb_size == 0) 11263 continue; 11264 11265 if (buf->dtb_flags & DTRACEBUF_RING) 11266 return (0); 11267 11268 if (!buf->dtb_switched && buf->dtb_offset != 0) 11269 return (0); 11270 11271 if (buf->dtb_switched - buf->dtb_interval < when) 11272 return (0); 11273 } 11274 11275 return (1); 11276 } 11277 11278 static void 11279 dtrace_buffer_free(dtrace_buffer_t *bufs) 11280 { 11281 int i; 11282 11283 for (i = 0; i < NCPU; i++) { 11284 dtrace_buffer_t *buf = &bufs[i]; 11285 11286 if (buf->dtb_tomax == NULL) { 11287 ASSERT(buf->dtb_xamot == NULL); 11288 ASSERT(buf->dtb_size == 0); 11289 continue; 11290 } 11291 11292 if (buf->dtb_xamot != NULL) { 11293 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11294 kmem_free(buf->dtb_xamot, buf->dtb_size); 11295 } 11296 11297 kmem_free(buf->dtb_tomax, buf->dtb_size); 11298 buf->dtb_size = 0; 11299 buf->dtb_tomax = NULL; 11300 buf->dtb_xamot = NULL; 11301 } 11302 } 11303 11304 /* 11305 * DTrace Enabling Functions 11306 */ 11307 static dtrace_enabling_t * 11308 dtrace_enabling_create(dtrace_vstate_t *vstate) 11309 { 11310 dtrace_enabling_t *enab; 11311 11312 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11313 enab->dten_vstate = vstate; 11314 11315 return (enab); 11316 } 11317 11318 static void 11319 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11320 { 11321 dtrace_ecbdesc_t **ndesc; 11322 size_t osize, nsize; 11323 11324 /* 11325 * We can't add to enablings after we've enabled them, or after we've 11326 * retained them. 11327 */ 11328 ASSERT(enab->dten_probegen == 0); 11329 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11330 11331 if (enab->dten_ndesc < enab->dten_maxdesc) { 11332 enab->dten_desc[enab->dten_ndesc++] = ecb; 11333 return; 11334 } 11335 11336 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11337 11338 if (enab->dten_maxdesc == 0) { 11339 enab->dten_maxdesc = 1; 11340 } else { 11341 enab->dten_maxdesc <<= 1; 11342 } 11343 11344 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11345 11346 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11347 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11348 bcopy(enab->dten_desc, ndesc, osize); 11349 if (enab->dten_desc != NULL) 11350 kmem_free(enab->dten_desc, osize); 11351 11352 enab->dten_desc = ndesc; 11353 enab->dten_desc[enab->dten_ndesc++] = ecb; 11354 } 11355 11356 static void 11357 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11358 dtrace_probedesc_t *pd) 11359 { 11360 dtrace_ecbdesc_t *new; 11361 dtrace_predicate_t *pred; 11362 dtrace_actdesc_t *act; 11363 11364 /* 11365 * We're going to create a new ECB description that matches the 11366 * specified ECB in every way, but has the specified probe description. 11367 */ 11368 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11369 11370 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11371 dtrace_predicate_hold(pred); 11372 11373 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11374 dtrace_actdesc_hold(act); 11375 11376 new->dted_action = ecb->dted_action; 11377 new->dted_pred = ecb->dted_pred; 11378 new->dted_probe = *pd; 11379 new->dted_uarg = ecb->dted_uarg; 11380 11381 dtrace_enabling_add(enab, new); 11382 } 11383 11384 static void 11385 dtrace_enabling_dump(dtrace_enabling_t *enab) 11386 { 11387 int i; 11388 11389 for (i = 0; i < enab->dten_ndesc; i++) { 11390 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11391 11392 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11393 desc->dtpd_provider, desc->dtpd_mod, 11394 desc->dtpd_func, desc->dtpd_name); 11395 } 11396 } 11397 11398 static void 11399 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11400 { 11401 int i; 11402 dtrace_ecbdesc_t *ep; 11403 dtrace_vstate_t *vstate = enab->dten_vstate; 11404 11405 ASSERT(MUTEX_HELD(&dtrace_lock)); 11406 11407 for (i = 0; i < enab->dten_ndesc; i++) { 11408 dtrace_actdesc_t *act, *next; 11409 dtrace_predicate_t *pred; 11410 11411 ep = enab->dten_desc[i]; 11412 11413 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11414 dtrace_predicate_release(pred, vstate); 11415 11416 for (act = ep->dted_action; act != NULL; act = next) { 11417 next = act->dtad_next; 11418 dtrace_actdesc_release(act, vstate); 11419 } 11420 11421 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11422 } 11423 11424 if (enab->dten_desc != NULL) 11425 kmem_free(enab->dten_desc, 11426 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11427 11428 /* 11429 * If this was a retained enabling, decrement the dts_nretained count 11430 * and take it off of the dtrace_retained list. 11431 */ 11432 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11433 dtrace_retained == enab) { 11434 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11435 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11436 enab->dten_vstate->dtvs_state->dts_nretained--; 11437 } 11438 11439 if (enab->dten_prev == NULL) { 11440 if (dtrace_retained == enab) { 11441 dtrace_retained = enab->dten_next; 11442 11443 if (dtrace_retained != NULL) 11444 dtrace_retained->dten_prev = NULL; 11445 } 11446 } else { 11447 ASSERT(enab != dtrace_retained); 11448 ASSERT(dtrace_retained != NULL); 11449 enab->dten_prev->dten_next = enab->dten_next; 11450 } 11451 11452 if (enab->dten_next != NULL) { 11453 ASSERT(dtrace_retained != NULL); 11454 enab->dten_next->dten_prev = enab->dten_prev; 11455 } 11456 11457 kmem_free(enab, sizeof (dtrace_enabling_t)); 11458 } 11459 11460 static int 11461 dtrace_enabling_retain(dtrace_enabling_t *enab) 11462 { 11463 dtrace_state_t *state; 11464 11465 ASSERT(MUTEX_HELD(&dtrace_lock)); 11466 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11467 ASSERT(enab->dten_vstate != NULL); 11468 11469 state = enab->dten_vstate->dtvs_state; 11470 ASSERT(state != NULL); 11471 11472 /* 11473 * We only allow each state to retain dtrace_retain_max enablings. 11474 */ 11475 if (state->dts_nretained >= dtrace_retain_max) 11476 return (ENOSPC); 11477 11478 state->dts_nretained++; 11479 11480 if (dtrace_retained == NULL) { 11481 dtrace_retained = enab; 11482 return (0); 11483 } 11484 11485 enab->dten_next = dtrace_retained; 11486 dtrace_retained->dten_prev = enab; 11487 dtrace_retained = enab; 11488 11489 return (0); 11490 } 11491 11492 static int 11493 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11494 dtrace_probedesc_t *create) 11495 { 11496 dtrace_enabling_t *new, *enab; 11497 int found = 0, err = ENOENT; 11498 11499 ASSERT(MUTEX_HELD(&dtrace_lock)); 11500 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11501 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11502 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11503 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11504 11505 new = dtrace_enabling_create(&state->dts_vstate); 11506 11507 /* 11508 * Iterate over all retained enablings, looking for enablings that 11509 * match the specified state. 11510 */ 11511 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11512 int i; 11513 11514 /* 11515 * dtvs_state can only be NULL for helper enablings -- and 11516 * helper enablings can't be retained. 11517 */ 11518 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11519 11520 if (enab->dten_vstate->dtvs_state != state) 11521 continue; 11522 11523 /* 11524 * Now iterate over each probe description; we're looking for 11525 * an exact match to the specified probe description. 11526 */ 11527 for (i = 0; i < enab->dten_ndesc; i++) { 11528 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11529 dtrace_probedesc_t *pd = &ep->dted_probe; 11530 11531 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11532 continue; 11533 11534 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11535 continue; 11536 11537 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11538 continue; 11539 11540 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11541 continue; 11542 11543 /* 11544 * We have a winning probe! Add it to our growing 11545 * enabling. 11546 */ 11547 found = 1; 11548 dtrace_enabling_addlike(new, ep, create); 11549 } 11550 } 11551 11552 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11553 dtrace_enabling_destroy(new); 11554 return (err); 11555 } 11556 11557 return (0); 11558 } 11559 11560 static void 11561 dtrace_enabling_retract(dtrace_state_t *state) 11562 { 11563 dtrace_enabling_t *enab, *next; 11564 11565 ASSERT(MUTEX_HELD(&dtrace_lock)); 11566 11567 /* 11568 * Iterate over all retained enablings, destroy the enablings retained 11569 * for the specified state. 11570 */ 11571 for (enab = dtrace_retained; enab != NULL; enab = next) { 11572 next = enab->dten_next; 11573 11574 /* 11575 * dtvs_state can only be NULL for helper enablings -- and 11576 * helper enablings can't be retained. 11577 */ 11578 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11579 11580 if (enab->dten_vstate->dtvs_state == state) { 11581 ASSERT(state->dts_nretained > 0); 11582 dtrace_enabling_destroy(enab); 11583 } 11584 } 11585 11586 ASSERT(state->dts_nretained == 0); 11587 } 11588 11589 static int 11590 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11591 { 11592 int i = 0; 11593 int matched = 0; 11594 11595 ASSERT(MUTEX_HELD(&cpu_lock)); 11596 ASSERT(MUTEX_HELD(&dtrace_lock)); 11597 11598 for (i = 0; i < enab->dten_ndesc; i++) { 11599 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11600 11601 enab->dten_current = ep; 11602 enab->dten_error = 0; 11603 11604 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11605 11606 if (enab->dten_error != 0) { 11607 /* 11608 * If we get an error half-way through enabling the 11609 * probes, we kick out -- perhaps with some number of 11610 * them enabled. Leaving enabled probes enabled may 11611 * be slightly confusing for user-level, but we expect 11612 * that no one will attempt to actually drive on in 11613 * the face of such errors. If this is an anonymous 11614 * enabling (indicated with a NULL nmatched pointer), 11615 * we cmn_err() a message. We aren't expecting to 11616 * get such an error -- such as it can exist at all, 11617 * it would be a result of corrupted DOF in the driver 11618 * properties. 11619 */ 11620 if (nmatched == NULL) { 11621 cmn_err(CE_WARN, "dtrace_enabling_match() " 11622 "error on %p: %d", (void *)ep, 11623 enab->dten_error); 11624 } 11625 11626 return (enab->dten_error); 11627 } 11628 } 11629 11630 enab->dten_probegen = dtrace_probegen; 11631 if (nmatched != NULL) 11632 *nmatched = matched; 11633 11634 return (0); 11635 } 11636 11637 static void 11638 dtrace_enabling_matchall(void) 11639 { 11640 dtrace_enabling_t *enab; 11641 11642 mutex_enter(&cpu_lock); 11643 mutex_enter(&dtrace_lock); 11644 11645 /* 11646 * Iterate over all retained enablings to see if any probes match 11647 * against them. We only perform this operation on enablings for which 11648 * we have sufficient permissions by virtue of being in the global zone 11649 * or in the same zone as the DTrace client. Because we can be called 11650 * after dtrace_detach() has been called, we cannot assert that there 11651 * are retained enablings. We can safely load from dtrace_retained, 11652 * however: the taskq_destroy() at the end of dtrace_detach() will 11653 * block pending our completion. 11654 */ 11655 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11656 #if defined(sun) 11657 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11658 11659 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11660 #endif 11661 (void) dtrace_enabling_match(enab, NULL); 11662 } 11663 11664 mutex_exit(&dtrace_lock); 11665 mutex_exit(&cpu_lock); 11666 } 11667 11668 /* 11669 * If an enabling is to be enabled without having matched probes (that is, if 11670 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11671 * enabling must be _primed_ by creating an ECB for every ECB description. 11672 * This must be done to assure that we know the number of speculations, the 11673 * number of aggregations, the minimum buffer size needed, etc. before we 11674 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11675 * enabling any probes, we create ECBs for every ECB decription, but with a 11676 * NULL probe -- which is exactly what this function does. 11677 */ 11678 static void 11679 dtrace_enabling_prime(dtrace_state_t *state) 11680 { 11681 dtrace_enabling_t *enab; 11682 int i; 11683 11684 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11685 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11686 11687 if (enab->dten_vstate->dtvs_state != state) 11688 continue; 11689 11690 /* 11691 * We don't want to prime an enabling more than once, lest 11692 * we allow a malicious user to induce resource exhaustion. 11693 * (The ECBs that result from priming an enabling aren't 11694 * leaked -- but they also aren't deallocated until the 11695 * consumer state is destroyed.) 11696 */ 11697 if (enab->dten_primed) 11698 continue; 11699 11700 for (i = 0; i < enab->dten_ndesc; i++) { 11701 enab->dten_current = enab->dten_desc[i]; 11702 (void) dtrace_probe_enable(NULL, enab); 11703 } 11704 11705 enab->dten_primed = 1; 11706 } 11707 } 11708 11709 /* 11710 * Called to indicate that probes should be provided due to retained 11711 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11712 * must take an initial lap through the enabling calling the dtps_provide() 11713 * entry point explicitly to allow for autocreated probes. 11714 */ 11715 static void 11716 dtrace_enabling_provide(dtrace_provider_t *prv) 11717 { 11718 int i, all = 0; 11719 dtrace_probedesc_t desc; 11720 11721 ASSERT(MUTEX_HELD(&dtrace_lock)); 11722 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11723 11724 if (prv == NULL) { 11725 all = 1; 11726 prv = dtrace_provider; 11727 } 11728 11729 do { 11730 dtrace_enabling_t *enab = dtrace_retained; 11731 void *parg = prv->dtpv_arg; 11732 11733 for (; enab != NULL; enab = enab->dten_next) { 11734 for (i = 0; i < enab->dten_ndesc; i++) { 11735 desc = enab->dten_desc[i]->dted_probe; 11736 mutex_exit(&dtrace_lock); 11737 prv->dtpv_pops.dtps_provide(parg, &desc); 11738 mutex_enter(&dtrace_lock); 11739 } 11740 } 11741 } while (all && (prv = prv->dtpv_next) != NULL); 11742 11743 mutex_exit(&dtrace_lock); 11744 dtrace_probe_provide(NULL, all ? NULL : prv); 11745 mutex_enter(&dtrace_lock); 11746 } 11747 11748 /* 11749 * Called to reap ECBs that are attached to probes from defunct providers. 11750 */ 11751 static void 11752 dtrace_enabling_reap(void) 11753 { 11754 dtrace_provider_t *prov; 11755 dtrace_probe_t *probe; 11756 dtrace_ecb_t *ecb; 11757 hrtime_t when; 11758 int i; 11759 11760 mutex_enter(&cpu_lock); 11761 mutex_enter(&dtrace_lock); 11762 11763 for (i = 0; i < dtrace_nprobes; i++) { 11764 if ((probe = dtrace_probes[i]) == NULL) 11765 continue; 11766 11767 if (probe->dtpr_ecb == NULL) 11768 continue; 11769 11770 prov = probe->dtpr_provider; 11771 11772 if ((when = prov->dtpv_defunct) == 0) 11773 continue; 11774 11775 /* 11776 * We have ECBs on a defunct provider: we want to reap these 11777 * ECBs to allow the provider to unregister. The destruction 11778 * of these ECBs must be done carefully: if we destroy the ECB 11779 * and the consumer later wishes to consume an EPID that 11780 * corresponds to the destroyed ECB (and if the EPID metadata 11781 * has not been previously consumed), the consumer will abort 11782 * processing on the unknown EPID. To reduce (but not, sadly, 11783 * eliminate) the possibility of this, we will only destroy an 11784 * ECB for a defunct provider if, for the state that 11785 * corresponds to the ECB: 11786 * 11787 * (a) There is no speculative tracing (which can effectively 11788 * cache an EPID for an arbitrary amount of time). 11789 * 11790 * (b) The principal buffers have been switched twice since the 11791 * provider became defunct. 11792 * 11793 * (c) The aggregation buffers are of zero size or have been 11794 * switched twice since the provider became defunct. 11795 * 11796 * We use dts_speculates to determine (a) and call a function 11797 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11798 * that as soon as we've been unable to destroy one of the ECBs 11799 * associated with the probe, we quit trying -- reaping is only 11800 * fruitful in as much as we can destroy all ECBs associated 11801 * with the defunct provider's probes. 11802 */ 11803 while ((ecb = probe->dtpr_ecb) != NULL) { 11804 dtrace_state_t *state = ecb->dte_state; 11805 dtrace_buffer_t *buf = state->dts_buffer; 11806 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11807 11808 if (state->dts_speculates) 11809 break; 11810 11811 if (!dtrace_buffer_consumed(buf, when)) 11812 break; 11813 11814 if (!dtrace_buffer_consumed(aggbuf, when)) 11815 break; 11816 11817 dtrace_ecb_disable(ecb); 11818 ASSERT(probe->dtpr_ecb != ecb); 11819 dtrace_ecb_destroy(ecb); 11820 } 11821 } 11822 11823 mutex_exit(&dtrace_lock); 11824 mutex_exit(&cpu_lock); 11825 } 11826 11827 /* 11828 * DTrace DOF Functions 11829 */ 11830 /*ARGSUSED*/ 11831 static void 11832 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11833 { 11834 if (dtrace_err_verbose) 11835 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11836 11837 #ifdef DTRACE_ERRDEBUG 11838 dtrace_errdebug(str); 11839 #endif 11840 } 11841 11842 /* 11843 * Create DOF out of a currently enabled state. Right now, we only create 11844 * DOF containing the run-time options -- but this could be expanded to create 11845 * complete DOF representing the enabled state. 11846 */ 11847 static dof_hdr_t * 11848 dtrace_dof_create(dtrace_state_t *state) 11849 { 11850 dof_hdr_t *dof; 11851 dof_sec_t *sec; 11852 dof_optdesc_t *opt; 11853 int i, len = sizeof (dof_hdr_t) + 11854 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11855 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11856 11857 ASSERT(MUTEX_HELD(&dtrace_lock)); 11858 11859 dof = kmem_zalloc(len, KM_SLEEP); 11860 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11861 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11862 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11863 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11864 11865 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11866 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11867 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11868 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11869 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11870 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11871 11872 dof->dofh_flags = 0; 11873 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11874 dof->dofh_secsize = sizeof (dof_sec_t); 11875 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11876 dof->dofh_secoff = sizeof (dof_hdr_t); 11877 dof->dofh_loadsz = len; 11878 dof->dofh_filesz = len; 11879 dof->dofh_pad = 0; 11880 11881 /* 11882 * Fill in the option section header... 11883 */ 11884 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11885 sec->dofs_type = DOF_SECT_OPTDESC; 11886 sec->dofs_align = sizeof (uint64_t); 11887 sec->dofs_flags = DOF_SECF_LOAD; 11888 sec->dofs_entsize = sizeof (dof_optdesc_t); 11889 11890 opt = (dof_optdesc_t *)((uintptr_t)sec + 11891 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11892 11893 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11894 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11895 11896 for (i = 0; i < DTRACEOPT_MAX; i++) { 11897 opt[i].dofo_option = i; 11898 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11899 opt[i].dofo_value = state->dts_options[i]; 11900 } 11901 11902 return (dof); 11903 } 11904 11905 static dof_hdr_t * 11906 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11907 { 11908 dof_hdr_t hdr, *dof; 11909 11910 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11911 11912 /* 11913 * First, we're going to copyin() the sizeof (dof_hdr_t). 11914 */ 11915 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11916 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11917 *errp = EFAULT; 11918 return (NULL); 11919 } 11920 11921 /* 11922 * Now we'll allocate the entire DOF and copy it in -- provided 11923 * that the length isn't outrageous. 11924 */ 11925 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11926 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11927 *errp = E2BIG; 11928 return (NULL); 11929 } 11930 11931 if (hdr.dofh_loadsz < sizeof (hdr)) { 11932 dtrace_dof_error(&hdr, "invalid load size"); 11933 *errp = EINVAL; 11934 return (NULL); 11935 } 11936 11937 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11938 11939 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11940 kmem_free(dof, hdr.dofh_loadsz); 11941 *errp = EFAULT; 11942 return (NULL); 11943 } 11944 11945 return (dof); 11946 } 11947 11948 #if !defined(sun) 11949 static __inline uchar_t 11950 dtrace_dof_char(char c) { 11951 switch (c) { 11952 case '0': 11953 case '1': 11954 case '2': 11955 case '3': 11956 case '4': 11957 case '5': 11958 case '6': 11959 case '7': 11960 case '8': 11961 case '9': 11962 return (c - '0'); 11963 case 'A': 11964 case 'B': 11965 case 'C': 11966 case 'D': 11967 case 'E': 11968 case 'F': 11969 return (c - 'A' + 10); 11970 case 'a': 11971 case 'b': 11972 case 'c': 11973 case 'd': 11974 case 'e': 11975 case 'f': 11976 return (c - 'a' + 10); 11977 } 11978 /* Should not reach here. */ 11979 return (0); 11980 } 11981 #endif 11982 11983 static dof_hdr_t * 11984 dtrace_dof_property(const char *name) 11985 { 11986 uchar_t *buf; 11987 uint64_t loadsz; 11988 unsigned int len, i; 11989 dof_hdr_t *dof; 11990 11991 #if defined(sun) 11992 /* 11993 * Unfortunately, array of values in .conf files are always (and 11994 * only) interpreted to be integer arrays. We must read our DOF 11995 * as an integer array, and then squeeze it into a byte array. 11996 */ 11997 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11998 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11999 return (NULL); 12000 12001 for (i = 0; i < len; i++) 12002 buf[i] = (uchar_t)(((int *)buf)[i]); 12003 12004 if (len < sizeof (dof_hdr_t)) { 12005 ddi_prop_free(buf); 12006 dtrace_dof_error(NULL, "truncated header"); 12007 return (NULL); 12008 } 12009 12010 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 12011 ddi_prop_free(buf); 12012 dtrace_dof_error(NULL, "truncated DOF"); 12013 return (NULL); 12014 } 12015 12016 if (loadsz >= dtrace_dof_maxsize) { 12017 ddi_prop_free(buf); 12018 dtrace_dof_error(NULL, "oversized DOF"); 12019 return (NULL); 12020 } 12021 12022 dof = kmem_alloc(loadsz, KM_SLEEP); 12023 bcopy(buf, dof, loadsz); 12024 ddi_prop_free(buf); 12025 #else 12026 char *p; 12027 char *p_env; 12028 12029 if ((p_env = getenv(name)) == NULL) 12030 return (NULL); 12031 12032 len = strlen(p_env) / 2; 12033 12034 buf = kmem_alloc(len, KM_SLEEP); 12035 12036 dof = (dof_hdr_t *) buf; 12037 12038 p = p_env; 12039 12040 for (i = 0; i < len; i++) { 12041 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12042 dtrace_dof_char(p[1]); 12043 p += 2; 12044 } 12045 12046 freeenv(p_env); 12047 12048 if (len < sizeof (dof_hdr_t)) { 12049 kmem_free(buf, 0); 12050 dtrace_dof_error(NULL, "truncated header"); 12051 return (NULL); 12052 } 12053 12054 if (len < (loadsz = dof->dofh_loadsz)) { 12055 kmem_free(buf, 0); 12056 dtrace_dof_error(NULL, "truncated DOF"); 12057 return (NULL); 12058 } 12059 12060 if (loadsz >= dtrace_dof_maxsize) { 12061 kmem_free(buf, 0); 12062 dtrace_dof_error(NULL, "oversized DOF"); 12063 return (NULL); 12064 } 12065 #endif 12066 12067 return (dof); 12068 } 12069 12070 static void 12071 dtrace_dof_destroy(dof_hdr_t *dof) 12072 { 12073 kmem_free(dof, dof->dofh_loadsz); 12074 } 12075 12076 /* 12077 * Return the dof_sec_t pointer corresponding to a given section index. If the 12078 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12079 * a type other than DOF_SECT_NONE is specified, the header is checked against 12080 * this type and NULL is returned if the types do not match. 12081 */ 12082 static dof_sec_t * 12083 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12084 { 12085 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12086 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12087 12088 if (i >= dof->dofh_secnum) { 12089 dtrace_dof_error(dof, "referenced section index is invalid"); 12090 return (NULL); 12091 } 12092 12093 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12094 dtrace_dof_error(dof, "referenced section is not loadable"); 12095 return (NULL); 12096 } 12097 12098 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12099 dtrace_dof_error(dof, "referenced section is the wrong type"); 12100 return (NULL); 12101 } 12102 12103 return (sec); 12104 } 12105 12106 static dtrace_probedesc_t * 12107 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12108 { 12109 dof_probedesc_t *probe; 12110 dof_sec_t *strtab; 12111 uintptr_t daddr = (uintptr_t)dof; 12112 uintptr_t str; 12113 size_t size; 12114 12115 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12116 dtrace_dof_error(dof, "invalid probe section"); 12117 return (NULL); 12118 } 12119 12120 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12121 dtrace_dof_error(dof, "bad alignment in probe description"); 12122 return (NULL); 12123 } 12124 12125 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12126 dtrace_dof_error(dof, "truncated probe description"); 12127 return (NULL); 12128 } 12129 12130 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12131 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12132 12133 if (strtab == NULL) 12134 return (NULL); 12135 12136 str = daddr + strtab->dofs_offset; 12137 size = strtab->dofs_size; 12138 12139 if (probe->dofp_provider >= strtab->dofs_size) { 12140 dtrace_dof_error(dof, "corrupt probe provider"); 12141 return (NULL); 12142 } 12143 12144 (void) strncpy(desc->dtpd_provider, 12145 (char *)(str + probe->dofp_provider), 12146 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12147 12148 if (probe->dofp_mod >= strtab->dofs_size) { 12149 dtrace_dof_error(dof, "corrupt probe module"); 12150 return (NULL); 12151 } 12152 12153 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12154 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12155 12156 if (probe->dofp_func >= strtab->dofs_size) { 12157 dtrace_dof_error(dof, "corrupt probe function"); 12158 return (NULL); 12159 } 12160 12161 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12162 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12163 12164 if (probe->dofp_name >= strtab->dofs_size) { 12165 dtrace_dof_error(dof, "corrupt probe name"); 12166 return (NULL); 12167 } 12168 12169 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12170 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12171 12172 return (desc); 12173 } 12174 12175 static dtrace_difo_t * 12176 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12177 cred_t *cr) 12178 { 12179 dtrace_difo_t *dp; 12180 size_t ttl = 0; 12181 dof_difohdr_t *dofd; 12182 uintptr_t daddr = (uintptr_t)dof; 12183 size_t max = dtrace_difo_maxsize; 12184 int i, l, n; 12185 12186 static const struct { 12187 int section; 12188 int bufoffs; 12189 int lenoffs; 12190 int entsize; 12191 int align; 12192 const char *msg; 12193 } difo[] = { 12194 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12195 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12196 sizeof (dif_instr_t), "multiple DIF sections" }, 12197 12198 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12199 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12200 sizeof (uint64_t), "multiple integer tables" }, 12201 12202 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12203 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12204 sizeof (char), "multiple string tables" }, 12205 12206 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12207 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12208 sizeof (uint_t), "multiple variable tables" }, 12209 12210 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12211 }; 12212 12213 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12214 dtrace_dof_error(dof, "invalid DIFO header section"); 12215 return (NULL); 12216 } 12217 12218 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12219 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12220 return (NULL); 12221 } 12222 12223 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12224 sec->dofs_size % sizeof (dof_secidx_t)) { 12225 dtrace_dof_error(dof, "bad size in DIFO header"); 12226 return (NULL); 12227 } 12228 12229 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12230 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12231 12232 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12233 dp->dtdo_rtype = dofd->dofd_rtype; 12234 12235 for (l = 0; l < n; l++) { 12236 dof_sec_t *subsec; 12237 void **bufp; 12238 uint32_t *lenp; 12239 12240 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12241 dofd->dofd_links[l])) == NULL) 12242 goto err; /* invalid section link */ 12243 12244 if (ttl + subsec->dofs_size > max) { 12245 dtrace_dof_error(dof, "exceeds maximum size"); 12246 goto err; 12247 } 12248 12249 ttl += subsec->dofs_size; 12250 12251 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12252 if (subsec->dofs_type != difo[i].section) 12253 continue; 12254 12255 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12256 dtrace_dof_error(dof, "section not loaded"); 12257 goto err; 12258 } 12259 12260 if (subsec->dofs_align != difo[i].align) { 12261 dtrace_dof_error(dof, "bad alignment"); 12262 goto err; 12263 } 12264 12265 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12266 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12267 12268 if (*bufp != NULL) { 12269 dtrace_dof_error(dof, difo[i].msg); 12270 goto err; 12271 } 12272 12273 if (difo[i].entsize != subsec->dofs_entsize) { 12274 dtrace_dof_error(dof, "entry size mismatch"); 12275 goto err; 12276 } 12277 12278 if (subsec->dofs_entsize != 0 && 12279 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12280 dtrace_dof_error(dof, "corrupt entry size"); 12281 goto err; 12282 } 12283 12284 *lenp = subsec->dofs_size; 12285 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12286 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12287 *bufp, subsec->dofs_size); 12288 12289 if (subsec->dofs_entsize != 0) 12290 *lenp /= subsec->dofs_entsize; 12291 12292 break; 12293 } 12294 12295 /* 12296 * If we encounter a loadable DIFO sub-section that is not 12297 * known to us, assume this is a broken program and fail. 12298 */ 12299 if (difo[i].section == DOF_SECT_NONE && 12300 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12301 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12302 goto err; 12303 } 12304 } 12305 12306 if (dp->dtdo_buf == NULL) { 12307 /* 12308 * We can't have a DIF object without DIF text. 12309 */ 12310 dtrace_dof_error(dof, "missing DIF text"); 12311 goto err; 12312 } 12313 12314 /* 12315 * Before we validate the DIF object, run through the variable table 12316 * looking for the strings -- if any of their size are under, we'll set 12317 * their size to be the system-wide default string size. Note that 12318 * this should _not_ happen if the "strsize" option has been set -- 12319 * in this case, the compiler should have set the size to reflect the 12320 * setting of the option. 12321 */ 12322 for (i = 0; i < dp->dtdo_varlen; i++) { 12323 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12324 dtrace_diftype_t *t = &v->dtdv_type; 12325 12326 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12327 continue; 12328 12329 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12330 t->dtdt_size = dtrace_strsize_default; 12331 } 12332 12333 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12334 goto err; 12335 12336 dtrace_difo_init(dp, vstate); 12337 return (dp); 12338 12339 err: 12340 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12341 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12342 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12343 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12344 12345 kmem_free(dp, sizeof (dtrace_difo_t)); 12346 return (NULL); 12347 } 12348 12349 static dtrace_predicate_t * 12350 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12351 cred_t *cr) 12352 { 12353 dtrace_difo_t *dp; 12354 12355 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12356 return (NULL); 12357 12358 return (dtrace_predicate_create(dp)); 12359 } 12360 12361 static dtrace_actdesc_t * 12362 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12363 cred_t *cr) 12364 { 12365 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12366 dof_actdesc_t *desc; 12367 dof_sec_t *difosec; 12368 size_t offs; 12369 uintptr_t daddr = (uintptr_t)dof; 12370 uint64_t arg; 12371 dtrace_actkind_t kind; 12372 12373 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12374 dtrace_dof_error(dof, "invalid action section"); 12375 return (NULL); 12376 } 12377 12378 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12379 dtrace_dof_error(dof, "truncated action description"); 12380 return (NULL); 12381 } 12382 12383 if (sec->dofs_align != sizeof (uint64_t)) { 12384 dtrace_dof_error(dof, "bad alignment in action description"); 12385 return (NULL); 12386 } 12387 12388 if (sec->dofs_size < sec->dofs_entsize) { 12389 dtrace_dof_error(dof, "section entry size exceeds total size"); 12390 return (NULL); 12391 } 12392 12393 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12394 dtrace_dof_error(dof, "bad entry size in action description"); 12395 return (NULL); 12396 } 12397 12398 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12399 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12400 return (NULL); 12401 } 12402 12403 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12404 desc = (dof_actdesc_t *)(daddr + 12405 (uintptr_t)sec->dofs_offset + offs); 12406 kind = (dtrace_actkind_t)desc->dofa_kind; 12407 12408 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12409 (kind != DTRACEACT_PRINTA || 12410 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12411 (kind == DTRACEACT_DIFEXPR && 12412 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12413 dof_sec_t *strtab; 12414 char *str, *fmt; 12415 uint64_t i; 12416 12417 /* 12418 * The argument to these actions is an index into the 12419 * DOF string table. For printf()-like actions, this 12420 * is the format string. For print(), this is the 12421 * CTF type of the expression result. 12422 */ 12423 if ((strtab = dtrace_dof_sect(dof, 12424 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12425 goto err; 12426 12427 str = (char *)((uintptr_t)dof + 12428 (uintptr_t)strtab->dofs_offset); 12429 12430 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12431 if (str[i] == '\0') 12432 break; 12433 } 12434 12435 if (i >= strtab->dofs_size) { 12436 dtrace_dof_error(dof, "bogus format string"); 12437 goto err; 12438 } 12439 12440 if (i == desc->dofa_arg) { 12441 dtrace_dof_error(dof, "empty format string"); 12442 goto err; 12443 } 12444 12445 i -= desc->dofa_arg; 12446 fmt = kmem_alloc(i + 1, KM_SLEEP); 12447 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12448 arg = (uint64_t)(uintptr_t)fmt; 12449 } else { 12450 if (kind == DTRACEACT_PRINTA) { 12451 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12452 arg = 0; 12453 } else { 12454 arg = desc->dofa_arg; 12455 } 12456 } 12457 12458 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12459 desc->dofa_uarg, arg); 12460 12461 if (last != NULL) { 12462 last->dtad_next = act; 12463 } else { 12464 first = act; 12465 } 12466 12467 last = act; 12468 12469 if (desc->dofa_difo == DOF_SECIDX_NONE) 12470 continue; 12471 12472 if ((difosec = dtrace_dof_sect(dof, 12473 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12474 goto err; 12475 12476 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12477 12478 if (act->dtad_difo == NULL) 12479 goto err; 12480 } 12481 12482 ASSERT(first != NULL); 12483 return (first); 12484 12485 err: 12486 for (act = first; act != NULL; act = next) { 12487 next = act->dtad_next; 12488 dtrace_actdesc_release(act, vstate); 12489 } 12490 12491 return (NULL); 12492 } 12493 12494 static dtrace_ecbdesc_t * 12495 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12496 cred_t *cr) 12497 { 12498 dtrace_ecbdesc_t *ep; 12499 dof_ecbdesc_t *ecb; 12500 dtrace_probedesc_t *desc; 12501 dtrace_predicate_t *pred = NULL; 12502 12503 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12504 dtrace_dof_error(dof, "truncated ECB description"); 12505 return (NULL); 12506 } 12507 12508 if (sec->dofs_align != sizeof (uint64_t)) { 12509 dtrace_dof_error(dof, "bad alignment in ECB description"); 12510 return (NULL); 12511 } 12512 12513 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12514 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12515 12516 if (sec == NULL) 12517 return (NULL); 12518 12519 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12520 ep->dted_uarg = ecb->dofe_uarg; 12521 desc = &ep->dted_probe; 12522 12523 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12524 goto err; 12525 12526 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12527 if ((sec = dtrace_dof_sect(dof, 12528 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12529 goto err; 12530 12531 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12532 goto err; 12533 12534 ep->dted_pred.dtpdd_predicate = pred; 12535 } 12536 12537 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12538 if ((sec = dtrace_dof_sect(dof, 12539 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12540 goto err; 12541 12542 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12543 12544 if (ep->dted_action == NULL) 12545 goto err; 12546 } 12547 12548 return (ep); 12549 12550 err: 12551 if (pred != NULL) 12552 dtrace_predicate_release(pred, vstate); 12553 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12554 return (NULL); 12555 } 12556 12557 /* 12558 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12559 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12560 * site of any user SETX relocations to account for load object base address. 12561 * In the future, if we need other relocations, this function can be extended. 12562 */ 12563 static int 12564 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12565 { 12566 uintptr_t daddr = (uintptr_t)dof; 12567 dof_relohdr_t *dofr = 12568 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12569 dof_sec_t *ss, *rs, *ts; 12570 dof_relodesc_t *r; 12571 uint_t i, n; 12572 12573 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12574 sec->dofs_align != sizeof (dof_secidx_t)) { 12575 dtrace_dof_error(dof, "invalid relocation header"); 12576 return (-1); 12577 } 12578 12579 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12580 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12581 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12582 12583 if (ss == NULL || rs == NULL || ts == NULL) 12584 return (-1); /* dtrace_dof_error() has been called already */ 12585 12586 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12587 rs->dofs_align != sizeof (uint64_t)) { 12588 dtrace_dof_error(dof, "invalid relocation section"); 12589 return (-1); 12590 } 12591 12592 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12593 n = rs->dofs_size / rs->dofs_entsize; 12594 12595 for (i = 0; i < n; i++) { 12596 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12597 12598 switch (r->dofr_type) { 12599 case DOF_RELO_NONE: 12600 break; 12601 case DOF_RELO_SETX: 12602 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12603 sizeof (uint64_t) > ts->dofs_size) { 12604 dtrace_dof_error(dof, "bad relocation offset"); 12605 return (-1); 12606 } 12607 12608 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12609 dtrace_dof_error(dof, "misaligned setx relo"); 12610 return (-1); 12611 } 12612 12613 *(uint64_t *)taddr += ubase; 12614 break; 12615 default: 12616 dtrace_dof_error(dof, "invalid relocation type"); 12617 return (-1); 12618 } 12619 12620 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12621 } 12622 12623 return (0); 12624 } 12625 12626 /* 12627 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12628 * header: it should be at the front of a memory region that is at least 12629 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12630 * size. It need not be validated in any other way. 12631 */ 12632 static int 12633 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12634 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12635 { 12636 uint64_t len = dof->dofh_loadsz, seclen; 12637 uintptr_t daddr = (uintptr_t)dof; 12638 dtrace_ecbdesc_t *ep; 12639 dtrace_enabling_t *enab; 12640 uint_t i; 12641 12642 ASSERT(MUTEX_HELD(&dtrace_lock)); 12643 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12644 12645 /* 12646 * Check the DOF header identification bytes. In addition to checking 12647 * valid settings, we also verify that unused bits/bytes are zeroed so 12648 * we can use them later without fear of regressing existing binaries. 12649 */ 12650 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12651 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12652 dtrace_dof_error(dof, "DOF magic string mismatch"); 12653 return (-1); 12654 } 12655 12656 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12657 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12658 dtrace_dof_error(dof, "DOF has invalid data model"); 12659 return (-1); 12660 } 12661 12662 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12663 dtrace_dof_error(dof, "DOF encoding mismatch"); 12664 return (-1); 12665 } 12666 12667 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12668 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12669 dtrace_dof_error(dof, "DOF version mismatch"); 12670 return (-1); 12671 } 12672 12673 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12674 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12675 return (-1); 12676 } 12677 12678 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12679 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12680 return (-1); 12681 } 12682 12683 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12684 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12685 return (-1); 12686 } 12687 12688 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12689 if (dof->dofh_ident[i] != 0) { 12690 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12691 return (-1); 12692 } 12693 } 12694 12695 if (dof->dofh_flags & ~DOF_FL_VALID) { 12696 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12697 return (-1); 12698 } 12699 12700 if (dof->dofh_secsize == 0) { 12701 dtrace_dof_error(dof, "zero section header size"); 12702 return (-1); 12703 } 12704 12705 /* 12706 * Check that the section headers don't exceed the amount of DOF 12707 * data. Note that we cast the section size and number of sections 12708 * to uint64_t's to prevent possible overflow in the multiplication. 12709 */ 12710 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12711 12712 if (dof->dofh_secoff > len || seclen > len || 12713 dof->dofh_secoff + seclen > len) { 12714 dtrace_dof_error(dof, "truncated section headers"); 12715 return (-1); 12716 } 12717 12718 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12719 dtrace_dof_error(dof, "misaligned section headers"); 12720 return (-1); 12721 } 12722 12723 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12724 dtrace_dof_error(dof, "misaligned section size"); 12725 return (-1); 12726 } 12727 12728 /* 12729 * Take an initial pass through the section headers to be sure that 12730 * the headers don't have stray offsets. If the 'noprobes' flag is 12731 * set, do not permit sections relating to providers, probes, or args. 12732 */ 12733 for (i = 0; i < dof->dofh_secnum; i++) { 12734 dof_sec_t *sec = (dof_sec_t *)(daddr + 12735 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12736 12737 if (noprobes) { 12738 switch (sec->dofs_type) { 12739 case DOF_SECT_PROVIDER: 12740 case DOF_SECT_PROBES: 12741 case DOF_SECT_PRARGS: 12742 case DOF_SECT_PROFFS: 12743 dtrace_dof_error(dof, "illegal sections " 12744 "for enabling"); 12745 return (-1); 12746 } 12747 } 12748 12749 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12750 continue; /* just ignore non-loadable sections */ 12751 12752 if (sec->dofs_align & (sec->dofs_align - 1)) { 12753 dtrace_dof_error(dof, "bad section alignment"); 12754 return (-1); 12755 } 12756 12757 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12758 dtrace_dof_error(dof, "misaligned section"); 12759 return (-1); 12760 } 12761 12762 if (sec->dofs_offset > len || sec->dofs_size > len || 12763 sec->dofs_offset + sec->dofs_size > len) { 12764 dtrace_dof_error(dof, "corrupt section header"); 12765 return (-1); 12766 } 12767 12768 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12769 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12770 dtrace_dof_error(dof, "non-terminating string table"); 12771 return (-1); 12772 } 12773 } 12774 12775 /* 12776 * Take a second pass through the sections and locate and perform any 12777 * relocations that are present. We do this after the first pass to 12778 * be sure that all sections have had their headers validated. 12779 */ 12780 for (i = 0; i < dof->dofh_secnum; i++) { 12781 dof_sec_t *sec = (dof_sec_t *)(daddr + 12782 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12783 12784 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12785 continue; /* skip sections that are not loadable */ 12786 12787 switch (sec->dofs_type) { 12788 case DOF_SECT_URELHDR: 12789 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12790 return (-1); 12791 break; 12792 } 12793 } 12794 12795 if ((enab = *enabp) == NULL) 12796 enab = *enabp = dtrace_enabling_create(vstate); 12797 12798 for (i = 0; i < dof->dofh_secnum; i++) { 12799 dof_sec_t *sec = (dof_sec_t *)(daddr + 12800 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12801 12802 if (sec->dofs_type != DOF_SECT_ECBDESC) 12803 continue; 12804 12805 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12806 dtrace_enabling_destroy(enab); 12807 *enabp = NULL; 12808 return (-1); 12809 } 12810 12811 dtrace_enabling_add(enab, ep); 12812 } 12813 12814 return (0); 12815 } 12816 12817 /* 12818 * Process DOF for any options. This routine assumes that the DOF has been 12819 * at least processed by dtrace_dof_slurp(). 12820 */ 12821 static int 12822 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12823 { 12824 int i, rval; 12825 uint32_t entsize; 12826 size_t offs; 12827 dof_optdesc_t *desc; 12828 12829 for (i = 0; i < dof->dofh_secnum; i++) { 12830 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12831 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12832 12833 if (sec->dofs_type != DOF_SECT_OPTDESC) 12834 continue; 12835 12836 if (sec->dofs_align != sizeof (uint64_t)) { 12837 dtrace_dof_error(dof, "bad alignment in " 12838 "option description"); 12839 return (EINVAL); 12840 } 12841 12842 if ((entsize = sec->dofs_entsize) == 0) { 12843 dtrace_dof_error(dof, "zeroed option entry size"); 12844 return (EINVAL); 12845 } 12846 12847 if (entsize < sizeof (dof_optdesc_t)) { 12848 dtrace_dof_error(dof, "bad option entry size"); 12849 return (EINVAL); 12850 } 12851 12852 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12853 desc = (dof_optdesc_t *)((uintptr_t)dof + 12854 (uintptr_t)sec->dofs_offset + offs); 12855 12856 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12857 dtrace_dof_error(dof, "non-zero option string"); 12858 return (EINVAL); 12859 } 12860 12861 if (desc->dofo_value == DTRACEOPT_UNSET) { 12862 dtrace_dof_error(dof, "unset option"); 12863 return (EINVAL); 12864 } 12865 12866 if ((rval = dtrace_state_option(state, 12867 desc->dofo_option, desc->dofo_value)) != 0) { 12868 dtrace_dof_error(dof, "rejected option"); 12869 return (rval); 12870 } 12871 } 12872 } 12873 12874 return (0); 12875 } 12876 12877 /* 12878 * DTrace Consumer State Functions 12879 */ 12880 static int 12881 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12882 { 12883 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12884 void *base; 12885 uintptr_t limit; 12886 dtrace_dynvar_t *dvar, *next, *start; 12887 int i; 12888 12889 ASSERT(MUTEX_HELD(&dtrace_lock)); 12890 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12891 12892 bzero(dstate, sizeof (dtrace_dstate_t)); 12893 12894 if ((dstate->dtds_chunksize = chunksize) == 0) 12895 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12896 12897 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12898 size = min; 12899 12900 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12901 return (ENOMEM); 12902 12903 dstate->dtds_size = size; 12904 dstate->dtds_base = base; 12905 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12906 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12907 12908 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12909 12910 if (hashsize != 1 && (hashsize & 1)) 12911 hashsize--; 12912 12913 dstate->dtds_hashsize = hashsize; 12914 dstate->dtds_hash = dstate->dtds_base; 12915 12916 /* 12917 * Set all of our hash buckets to point to the single sink, and (if 12918 * it hasn't already been set), set the sink's hash value to be the 12919 * sink sentinel value. The sink is needed for dynamic variable 12920 * lookups to know that they have iterated over an entire, valid hash 12921 * chain. 12922 */ 12923 for (i = 0; i < hashsize; i++) 12924 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12925 12926 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12927 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12928 12929 /* 12930 * Determine number of active CPUs. Divide free list evenly among 12931 * active CPUs. 12932 */ 12933 start = (dtrace_dynvar_t *) 12934 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12935 limit = (uintptr_t)base + size; 12936 12937 maxper = (limit - (uintptr_t)start) / NCPU; 12938 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12939 12940 #if !defined(sun) 12941 CPU_FOREACH(i) { 12942 #else 12943 for (i = 0; i < NCPU; i++) { 12944 #endif 12945 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12946 12947 /* 12948 * If we don't even have enough chunks to make it once through 12949 * NCPUs, we're just going to allocate everything to the first 12950 * CPU. And if we're on the last CPU, we're going to allocate 12951 * whatever is left over. In either case, we set the limit to 12952 * be the limit of the dynamic variable space. 12953 */ 12954 if (maxper == 0 || i == NCPU - 1) { 12955 limit = (uintptr_t)base + size; 12956 start = NULL; 12957 } else { 12958 limit = (uintptr_t)start + maxper; 12959 start = (dtrace_dynvar_t *)limit; 12960 } 12961 12962 ASSERT(limit <= (uintptr_t)base + size); 12963 12964 for (;;) { 12965 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12966 dstate->dtds_chunksize); 12967 12968 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12969 break; 12970 12971 dvar->dtdv_next = next; 12972 dvar = next; 12973 } 12974 12975 if (maxper == 0) 12976 break; 12977 } 12978 12979 return (0); 12980 } 12981 12982 static void 12983 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12984 { 12985 ASSERT(MUTEX_HELD(&cpu_lock)); 12986 12987 if (dstate->dtds_base == NULL) 12988 return; 12989 12990 kmem_free(dstate->dtds_base, dstate->dtds_size); 12991 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12992 } 12993 12994 static void 12995 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12996 { 12997 /* 12998 * Logical XOR, where are you? 12999 */ 13000 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 13001 13002 if (vstate->dtvs_nglobals > 0) { 13003 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 13004 sizeof (dtrace_statvar_t *)); 13005 } 13006 13007 if (vstate->dtvs_ntlocals > 0) { 13008 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 13009 sizeof (dtrace_difv_t)); 13010 } 13011 13012 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13013 13014 if (vstate->dtvs_nlocals > 0) { 13015 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13016 sizeof (dtrace_statvar_t *)); 13017 } 13018 } 13019 13020 #if defined(sun) 13021 static void 13022 dtrace_state_clean(dtrace_state_t *state) 13023 { 13024 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13025 return; 13026 13027 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13028 dtrace_speculation_clean(state); 13029 } 13030 13031 static void 13032 dtrace_state_deadman(dtrace_state_t *state) 13033 { 13034 hrtime_t now; 13035 13036 dtrace_sync(); 13037 13038 now = dtrace_gethrtime(); 13039 13040 if (state != dtrace_anon.dta_state && 13041 now - state->dts_laststatus >= dtrace_deadman_user) 13042 return; 13043 13044 /* 13045 * We must be sure that dts_alive never appears to be less than the 13046 * value upon entry to dtrace_state_deadman(), and because we lack a 13047 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13048 * store INT64_MAX to it, followed by a memory barrier, followed by 13049 * the new value. This assures that dts_alive never appears to be 13050 * less than its true value, regardless of the order in which the 13051 * stores to the underlying storage are issued. 13052 */ 13053 state->dts_alive = INT64_MAX; 13054 dtrace_membar_producer(); 13055 state->dts_alive = now; 13056 } 13057 #else 13058 static void 13059 dtrace_state_clean(void *arg) 13060 { 13061 dtrace_state_t *state = arg; 13062 dtrace_optval_t *opt = state->dts_options; 13063 13064 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13065 return; 13066 13067 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13068 dtrace_speculation_clean(state); 13069 13070 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13071 dtrace_state_clean, state); 13072 } 13073 13074 static void 13075 dtrace_state_deadman(void *arg) 13076 { 13077 dtrace_state_t *state = arg; 13078 hrtime_t now; 13079 13080 dtrace_sync(); 13081 13082 dtrace_debug_output(); 13083 13084 now = dtrace_gethrtime(); 13085 13086 if (state != dtrace_anon.dta_state && 13087 now - state->dts_laststatus >= dtrace_deadman_user) 13088 return; 13089 13090 /* 13091 * We must be sure that dts_alive never appears to be less than the 13092 * value upon entry to dtrace_state_deadman(), and because we lack a 13093 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13094 * store INT64_MAX to it, followed by a memory barrier, followed by 13095 * the new value. This assures that dts_alive never appears to be 13096 * less than its true value, regardless of the order in which the 13097 * stores to the underlying storage are issued. 13098 */ 13099 state->dts_alive = INT64_MAX; 13100 dtrace_membar_producer(); 13101 state->dts_alive = now; 13102 13103 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13104 dtrace_state_deadman, state); 13105 } 13106 #endif 13107 13108 static dtrace_state_t * 13109 #if defined(sun) 13110 dtrace_state_create(dev_t *devp, cred_t *cr) 13111 #else 13112 dtrace_state_create(struct cdev *dev) 13113 #endif 13114 { 13115 #if defined(sun) 13116 minor_t minor; 13117 major_t major; 13118 #else 13119 cred_t *cr = NULL; 13120 int m = 0; 13121 #endif 13122 char c[30]; 13123 dtrace_state_t *state; 13124 dtrace_optval_t *opt; 13125 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13126 13127 ASSERT(MUTEX_HELD(&dtrace_lock)); 13128 ASSERT(MUTEX_HELD(&cpu_lock)); 13129 13130 #if defined(sun) 13131 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13132 VM_BESTFIT | VM_SLEEP); 13133 13134 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13135 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13136 return (NULL); 13137 } 13138 13139 state = ddi_get_soft_state(dtrace_softstate, minor); 13140 #else 13141 if (dev != NULL) { 13142 cr = dev->si_cred; 13143 m = dev2unit(dev); 13144 } 13145 13146 /* Allocate memory for the state. */ 13147 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13148 #endif 13149 13150 state->dts_epid = DTRACE_EPIDNONE + 1; 13151 13152 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13153 #if defined(sun) 13154 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13155 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13156 13157 if (devp != NULL) { 13158 major = getemajor(*devp); 13159 } else { 13160 major = ddi_driver_major(dtrace_devi); 13161 } 13162 13163 state->dts_dev = makedevice(major, minor); 13164 13165 if (devp != NULL) 13166 *devp = state->dts_dev; 13167 #else 13168 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13169 state->dts_dev = dev; 13170 #endif 13171 13172 /* 13173 * We allocate NCPU buffers. On the one hand, this can be quite 13174 * a bit of memory per instance (nearly 36K on a Starcat). On the 13175 * other hand, it saves an additional memory reference in the probe 13176 * path. 13177 */ 13178 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13179 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13180 13181 #if defined(sun) 13182 state->dts_cleaner = CYCLIC_NONE; 13183 state->dts_deadman = CYCLIC_NONE; 13184 #else 13185 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13186 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13187 #endif 13188 state->dts_vstate.dtvs_state = state; 13189 13190 for (i = 0; i < DTRACEOPT_MAX; i++) 13191 state->dts_options[i] = DTRACEOPT_UNSET; 13192 13193 /* 13194 * Set the default options. 13195 */ 13196 opt = state->dts_options; 13197 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13198 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13199 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13200 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13201 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13202 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13203 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13204 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13205 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13206 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13207 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13208 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13209 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13210 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13211 13212 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13213 13214 /* 13215 * Depending on the user credentials, we set flag bits which alter probe 13216 * visibility or the amount of destructiveness allowed. In the case of 13217 * actual anonymous tracing, or the possession of all privileges, all of 13218 * the normal checks are bypassed. 13219 */ 13220 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13221 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13222 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13223 } else { 13224 /* 13225 * Set up the credentials for this instantiation. We take a 13226 * hold on the credential to prevent it from disappearing on 13227 * us; this in turn prevents the zone_t referenced by this 13228 * credential from disappearing. This means that we can 13229 * examine the credential and the zone from probe context. 13230 */ 13231 crhold(cr); 13232 state->dts_cred.dcr_cred = cr; 13233 13234 /* 13235 * CRA_PROC means "we have *some* privilege for dtrace" and 13236 * unlocks the use of variables like pid, zonename, etc. 13237 */ 13238 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13239 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13240 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13241 } 13242 13243 /* 13244 * dtrace_user allows use of syscall and profile providers. 13245 * If the user also has proc_owner and/or proc_zone, we 13246 * extend the scope to include additional visibility and 13247 * destructive power. 13248 */ 13249 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13250 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13251 state->dts_cred.dcr_visible |= 13252 DTRACE_CRV_ALLPROC; 13253 13254 state->dts_cred.dcr_action |= 13255 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13256 } 13257 13258 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13259 state->dts_cred.dcr_visible |= 13260 DTRACE_CRV_ALLZONE; 13261 13262 state->dts_cred.dcr_action |= 13263 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13264 } 13265 13266 /* 13267 * If we have all privs in whatever zone this is, 13268 * we can do destructive things to processes which 13269 * have altered credentials. 13270 */ 13271 #if defined(sun) 13272 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13273 cr->cr_zone->zone_privset)) { 13274 state->dts_cred.dcr_action |= 13275 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13276 } 13277 #endif 13278 } 13279 13280 /* 13281 * Holding the dtrace_kernel privilege also implies that 13282 * the user has the dtrace_user privilege from a visibility 13283 * perspective. But without further privileges, some 13284 * destructive actions are not available. 13285 */ 13286 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13287 /* 13288 * Make all probes in all zones visible. However, 13289 * this doesn't mean that all actions become available 13290 * to all zones. 13291 */ 13292 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13293 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13294 13295 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13296 DTRACE_CRA_PROC; 13297 /* 13298 * Holding proc_owner means that destructive actions 13299 * for *this* zone are allowed. 13300 */ 13301 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13302 state->dts_cred.dcr_action |= 13303 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13304 13305 /* 13306 * Holding proc_zone means that destructive actions 13307 * for this user/group ID in all zones is allowed. 13308 */ 13309 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13310 state->dts_cred.dcr_action |= 13311 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13312 13313 #if defined(sun) 13314 /* 13315 * If we have all privs in whatever zone this is, 13316 * we can do destructive things to processes which 13317 * have altered credentials. 13318 */ 13319 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13320 cr->cr_zone->zone_privset)) { 13321 state->dts_cred.dcr_action |= 13322 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13323 } 13324 #endif 13325 } 13326 13327 /* 13328 * Holding the dtrace_proc privilege gives control over fasttrap 13329 * and pid providers. We need to grant wider destructive 13330 * privileges in the event that the user has proc_owner and/or 13331 * proc_zone. 13332 */ 13333 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13334 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13335 state->dts_cred.dcr_action |= 13336 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13337 13338 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13339 state->dts_cred.dcr_action |= 13340 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13341 } 13342 } 13343 13344 return (state); 13345 } 13346 13347 static int 13348 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13349 { 13350 dtrace_optval_t *opt = state->dts_options, size; 13351 processorid_t cpu = 0;; 13352 int flags = 0, rval; 13353 13354 ASSERT(MUTEX_HELD(&dtrace_lock)); 13355 ASSERT(MUTEX_HELD(&cpu_lock)); 13356 ASSERT(which < DTRACEOPT_MAX); 13357 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13358 (state == dtrace_anon.dta_state && 13359 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13360 13361 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13362 return (0); 13363 13364 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13365 cpu = opt[DTRACEOPT_CPU]; 13366 13367 if (which == DTRACEOPT_SPECSIZE) 13368 flags |= DTRACEBUF_NOSWITCH; 13369 13370 if (which == DTRACEOPT_BUFSIZE) { 13371 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13372 flags |= DTRACEBUF_RING; 13373 13374 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13375 flags |= DTRACEBUF_FILL; 13376 13377 if (state != dtrace_anon.dta_state || 13378 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13379 flags |= DTRACEBUF_INACTIVE; 13380 } 13381 13382 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13383 /* 13384 * The size must be 8-byte aligned. If the size is not 8-byte 13385 * aligned, drop it down by the difference. 13386 */ 13387 if (size & (sizeof (uint64_t) - 1)) 13388 size -= size & (sizeof (uint64_t) - 1); 13389 13390 if (size < state->dts_reserve) { 13391 /* 13392 * Buffers always must be large enough to accommodate 13393 * their prereserved space. We return E2BIG instead 13394 * of ENOMEM in this case to allow for user-level 13395 * software to differentiate the cases. 13396 */ 13397 return (E2BIG); 13398 } 13399 13400 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13401 13402 if (rval != ENOMEM) { 13403 opt[which] = size; 13404 return (rval); 13405 } 13406 13407 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13408 return (rval); 13409 } 13410 13411 return (ENOMEM); 13412 } 13413 13414 static int 13415 dtrace_state_buffers(dtrace_state_t *state) 13416 { 13417 dtrace_speculation_t *spec = state->dts_speculations; 13418 int rval, i; 13419 13420 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13421 DTRACEOPT_BUFSIZE)) != 0) 13422 return (rval); 13423 13424 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13425 DTRACEOPT_AGGSIZE)) != 0) 13426 return (rval); 13427 13428 for (i = 0; i < state->dts_nspeculations; i++) { 13429 if ((rval = dtrace_state_buffer(state, 13430 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13431 return (rval); 13432 } 13433 13434 return (0); 13435 } 13436 13437 static void 13438 dtrace_state_prereserve(dtrace_state_t *state) 13439 { 13440 dtrace_ecb_t *ecb; 13441 dtrace_probe_t *probe; 13442 13443 state->dts_reserve = 0; 13444 13445 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13446 return; 13447 13448 /* 13449 * If our buffer policy is a "fill" buffer policy, we need to set the 13450 * prereserved space to be the space required by the END probes. 13451 */ 13452 probe = dtrace_probes[dtrace_probeid_end - 1]; 13453 ASSERT(probe != NULL); 13454 13455 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13456 if (ecb->dte_state != state) 13457 continue; 13458 13459 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13460 } 13461 } 13462 13463 static int 13464 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13465 { 13466 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13467 dtrace_speculation_t *spec; 13468 dtrace_buffer_t *buf; 13469 #if defined(sun) 13470 cyc_handler_t hdlr; 13471 cyc_time_t when; 13472 #endif 13473 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13474 dtrace_icookie_t cookie; 13475 13476 mutex_enter(&cpu_lock); 13477 mutex_enter(&dtrace_lock); 13478 13479 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13480 rval = EBUSY; 13481 goto out; 13482 } 13483 13484 /* 13485 * Before we can perform any checks, we must prime all of the 13486 * retained enablings that correspond to this state. 13487 */ 13488 dtrace_enabling_prime(state); 13489 13490 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13491 rval = EACCES; 13492 goto out; 13493 } 13494 13495 dtrace_state_prereserve(state); 13496 13497 /* 13498 * Now we want to do is try to allocate our speculations. 13499 * We do not automatically resize the number of speculations; if 13500 * this fails, we will fail the operation. 13501 */ 13502 nspec = opt[DTRACEOPT_NSPEC]; 13503 ASSERT(nspec != DTRACEOPT_UNSET); 13504 13505 if (nspec > INT_MAX) { 13506 rval = ENOMEM; 13507 goto out; 13508 } 13509 13510 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13511 13512 if (spec == NULL) { 13513 rval = ENOMEM; 13514 goto out; 13515 } 13516 13517 state->dts_speculations = spec; 13518 state->dts_nspeculations = (int)nspec; 13519 13520 for (i = 0; i < nspec; i++) { 13521 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13522 rval = ENOMEM; 13523 goto err; 13524 } 13525 13526 spec[i].dtsp_buffer = buf; 13527 } 13528 13529 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13530 if (dtrace_anon.dta_state == NULL) { 13531 rval = ENOENT; 13532 goto out; 13533 } 13534 13535 if (state->dts_necbs != 0) { 13536 rval = EALREADY; 13537 goto out; 13538 } 13539 13540 state->dts_anon = dtrace_anon_grab(); 13541 ASSERT(state->dts_anon != NULL); 13542 state = state->dts_anon; 13543 13544 /* 13545 * We want "grabanon" to be set in the grabbed state, so we'll 13546 * copy that option value from the grabbing state into the 13547 * grabbed state. 13548 */ 13549 state->dts_options[DTRACEOPT_GRABANON] = 13550 opt[DTRACEOPT_GRABANON]; 13551 13552 *cpu = dtrace_anon.dta_beganon; 13553 13554 /* 13555 * If the anonymous state is active (as it almost certainly 13556 * is if the anonymous enabling ultimately matched anything), 13557 * we don't allow any further option processing -- but we 13558 * don't return failure. 13559 */ 13560 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13561 goto out; 13562 } 13563 13564 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13565 opt[DTRACEOPT_AGGSIZE] != 0) { 13566 if (state->dts_aggregations == NULL) { 13567 /* 13568 * We're not going to create an aggregation buffer 13569 * because we don't have any ECBs that contain 13570 * aggregations -- set this option to 0. 13571 */ 13572 opt[DTRACEOPT_AGGSIZE] = 0; 13573 } else { 13574 /* 13575 * If we have an aggregation buffer, we must also have 13576 * a buffer to use as scratch. 13577 */ 13578 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13579 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13580 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13581 } 13582 } 13583 } 13584 13585 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13586 opt[DTRACEOPT_SPECSIZE] != 0) { 13587 if (!state->dts_speculates) { 13588 /* 13589 * We're not going to create speculation buffers 13590 * because we don't have any ECBs that actually 13591 * speculate -- set the speculation size to 0. 13592 */ 13593 opt[DTRACEOPT_SPECSIZE] = 0; 13594 } 13595 } 13596 13597 /* 13598 * The bare minimum size for any buffer that we're actually going to 13599 * do anything to is sizeof (uint64_t). 13600 */ 13601 sz = sizeof (uint64_t); 13602 13603 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13604 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13605 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13606 /* 13607 * A buffer size has been explicitly set to 0 (or to a size 13608 * that will be adjusted to 0) and we need the space -- we 13609 * need to return failure. We return ENOSPC to differentiate 13610 * it from failing to allocate a buffer due to failure to meet 13611 * the reserve (for which we return E2BIG). 13612 */ 13613 rval = ENOSPC; 13614 goto out; 13615 } 13616 13617 if ((rval = dtrace_state_buffers(state)) != 0) 13618 goto err; 13619 13620 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13621 sz = dtrace_dstate_defsize; 13622 13623 do { 13624 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13625 13626 if (rval == 0) 13627 break; 13628 13629 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13630 goto err; 13631 } while (sz >>= 1); 13632 13633 opt[DTRACEOPT_DYNVARSIZE] = sz; 13634 13635 if (rval != 0) 13636 goto err; 13637 13638 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13639 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13640 13641 if (opt[DTRACEOPT_CLEANRATE] == 0) 13642 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13643 13644 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13645 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13646 13647 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13648 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13649 13650 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13651 #if defined(sun) 13652 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13653 hdlr.cyh_arg = state; 13654 hdlr.cyh_level = CY_LOW_LEVEL; 13655 13656 when.cyt_when = 0; 13657 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13658 13659 state->dts_cleaner = cyclic_add(&hdlr, &when); 13660 13661 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13662 hdlr.cyh_arg = state; 13663 hdlr.cyh_level = CY_LOW_LEVEL; 13664 13665 when.cyt_when = 0; 13666 when.cyt_interval = dtrace_deadman_interval; 13667 13668 state->dts_deadman = cyclic_add(&hdlr, &when); 13669 #else 13670 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13671 dtrace_state_clean, state); 13672 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13673 dtrace_state_deadman, state); 13674 #endif 13675 13676 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13677 13678 /* 13679 * Now it's time to actually fire the BEGIN probe. We need to disable 13680 * interrupts here both to record the CPU on which we fired the BEGIN 13681 * probe (the data from this CPU will be processed first at user 13682 * level) and to manually activate the buffer for this CPU. 13683 */ 13684 cookie = dtrace_interrupt_disable(); 13685 *cpu = curcpu; 13686 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13687 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13688 13689 dtrace_probe(dtrace_probeid_begin, 13690 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13691 dtrace_interrupt_enable(cookie); 13692 /* 13693 * We may have had an exit action from a BEGIN probe; only change our 13694 * state to ACTIVE if we're still in WARMUP. 13695 */ 13696 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13697 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13698 13699 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13700 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13701 13702 /* 13703 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13704 * want each CPU to transition its principal buffer out of the 13705 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13706 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13707 * atomically transition from processing none of a state's ECBs to 13708 * processing all of them. 13709 */ 13710 dtrace_xcall(DTRACE_CPUALL, 13711 (dtrace_xcall_t)dtrace_buffer_activate, state); 13712 goto out; 13713 13714 err: 13715 dtrace_buffer_free(state->dts_buffer); 13716 dtrace_buffer_free(state->dts_aggbuffer); 13717 13718 if ((nspec = state->dts_nspeculations) == 0) { 13719 ASSERT(state->dts_speculations == NULL); 13720 goto out; 13721 } 13722 13723 spec = state->dts_speculations; 13724 ASSERT(spec != NULL); 13725 13726 for (i = 0; i < state->dts_nspeculations; i++) { 13727 if ((buf = spec[i].dtsp_buffer) == NULL) 13728 break; 13729 13730 dtrace_buffer_free(buf); 13731 kmem_free(buf, bufsize); 13732 } 13733 13734 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13735 state->dts_nspeculations = 0; 13736 state->dts_speculations = NULL; 13737 13738 out: 13739 mutex_exit(&dtrace_lock); 13740 mutex_exit(&cpu_lock); 13741 13742 return (rval); 13743 } 13744 13745 static int 13746 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13747 { 13748 dtrace_icookie_t cookie; 13749 13750 ASSERT(MUTEX_HELD(&dtrace_lock)); 13751 13752 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13753 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13754 return (EINVAL); 13755 13756 /* 13757 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13758 * to be sure that every CPU has seen it. See below for the details 13759 * on why this is done. 13760 */ 13761 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13762 dtrace_sync(); 13763 13764 /* 13765 * By this point, it is impossible for any CPU to be still processing 13766 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13767 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13768 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13769 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13770 * iff we're in the END probe. 13771 */ 13772 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13773 dtrace_sync(); 13774 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13775 13776 /* 13777 * Finally, we can release the reserve and call the END probe. We 13778 * disable interrupts across calling the END probe to allow us to 13779 * return the CPU on which we actually called the END probe. This 13780 * allows user-land to be sure that this CPU's principal buffer is 13781 * processed last. 13782 */ 13783 state->dts_reserve = 0; 13784 13785 cookie = dtrace_interrupt_disable(); 13786 *cpu = curcpu; 13787 dtrace_probe(dtrace_probeid_end, 13788 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13789 dtrace_interrupt_enable(cookie); 13790 13791 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13792 dtrace_sync(); 13793 13794 return (0); 13795 } 13796 13797 static int 13798 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13799 dtrace_optval_t val) 13800 { 13801 ASSERT(MUTEX_HELD(&dtrace_lock)); 13802 13803 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13804 return (EBUSY); 13805 13806 if (option >= DTRACEOPT_MAX) 13807 return (EINVAL); 13808 13809 if (option != DTRACEOPT_CPU && val < 0) 13810 return (EINVAL); 13811 13812 switch (option) { 13813 case DTRACEOPT_DESTRUCTIVE: 13814 if (dtrace_destructive_disallow) 13815 return (EACCES); 13816 13817 state->dts_cred.dcr_destructive = 1; 13818 break; 13819 13820 case DTRACEOPT_BUFSIZE: 13821 case DTRACEOPT_DYNVARSIZE: 13822 case DTRACEOPT_AGGSIZE: 13823 case DTRACEOPT_SPECSIZE: 13824 case DTRACEOPT_STRSIZE: 13825 if (val < 0) 13826 return (EINVAL); 13827 13828 if (val >= LONG_MAX) { 13829 /* 13830 * If this is an otherwise negative value, set it to 13831 * the highest multiple of 128m less than LONG_MAX. 13832 * Technically, we're adjusting the size without 13833 * regard to the buffer resizing policy, but in fact, 13834 * this has no effect -- if we set the buffer size to 13835 * ~LONG_MAX and the buffer policy is ultimately set to 13836 * be "manual", the buffer allocation is guaranteed to 13837 * fail, if only because the allocation requires two 13838 * buffers. (We set the the size to the highest 13839 * multiple of 128m because it ensures that the size 13840 * will remain a multiple of a megabyte when 13841 * repeatedly halved -- all the way down to 15m.) 13842 */ 13843 val = LONG_MAX - (1 << 27) + 1; 13844 } 13845 } 13846 13847 state->dts_options[option] = val; 13848 13849 return (0); 13850 } 13851 13852 static void 13853 dtrace_state_destroy(dtrace_state_t *state) 13854 { 13855 dtrace_ecb_t *ecb; 13856 dtrace_vstate_t *vstate = &state->dts_vstate; 13857 #if defined(sun) 13858 minor_t minor = getminor(state->dts_dev); 13859 #endif 13860 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13861 dtrace_speculation_t *spec = state->dts_speculations; 13862 int nspec = state->dts_nspeculations; 13863 uint32_t match; 13864 13865 ASSERT(MUTEX_HELD(&dtrace_lock)); 13866 ASSERT(MUTEX_HELD(&cpu_lock)); 13867 13868 /* 13869 * First, retract any retained enablings for this state. 13870 */ 13871 dtrace_enabling_retract(state); 13872 ASSERT(state->dts_nretained == 0); 13873 13874 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13875 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13876 /* 13877 * We have managed to come into dtrace_state_destroy() on a 13878 * hot enabling -- almost certainly because of a disorderly 13879 * shutdown of a consumer. (That is, a consumer that is 13880 * exiting without having called dtrace_stop().) In this case, 13881 * we're going to set our activity to be KILLED, and then 13882 * issue a sync to be sure that everyone is out of probe 13883 * context before we start blowing away ECBs. 13884 */ 13885 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13886 dtrace_sync(); 13887 } 13888 13889 /* 13890 * Release the credential hold we took in dtrace_state_create(). 13891 */ 13892 if (state->dts_cred.dcr_cred != NULL) 13893 crfree(state->dts_cred.dcr_cred); 13894 13895 /* 13896 * Now we can safely disable and destroy any enabled probes. Because 13897 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13898 * (especially if they're all enabled), we take two passes through the 13899 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13900 * in the second we disable whatever is left over. 13901 */ 13902 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13903 for (i = 0; i < state->dts_necbs; i++) { 13904 if ((ecb = state->dts_ecbs[i]) == NULL) 13905 continue; 13906 13907 if (match && ecb->dte_probe != NULL) { 13908 dtrace_probe_t *probe = ecb->dte_probe; 13909 dtrace_provider_t *prov = probe->dtpr_provider; 13910 13911 if (!(prov->dtpv_priv.dtpp_flags & match)) 13912 continue; 13913 } 13914 13915 dtrace_ecb_disable(ecb); 13916 dtrace_ecb_destroy(ecb); 13917 } 13918 13919 if (!match) 13920 break; 13921 } 13922 13923 /* 13924 * Before we free the buffers, perform one more sync to assure that 13925 * every CPU is out of probe context. 13926 */ 13927 dtrace_sync(); 13928 13929 dtrace_buffer_free(state->dts_buffer); 13930 dtrace_buffer_free(state->dts_aggbuffer); 13931 13932 for (i = 0; i < nspec; i++) 13933 dtrace_buffer_free(spec[i].dtsp_buffer); 13934 13935 #if defined(sun) 13936 if (state->dts_cleaner != CYCLIC_NONE) 13937 cyclic_remove(state->dts_cleaner); 13938 13939 if (state->dts_deadman != CYCLIC_NONE) 13940 cyclic_remove(state->dts_deadman); 13941 #else 13942 callout_stop(&state->dts_cleaner); 13943 callout_drain(&state->dts_cleaner); 13944 callout_stop(&state->dts_deadman); 13945 callout_drain(&state->dts_deadman); 13946 #endif 13947 13948 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13949 dtrace_vstate_fini(vstate); 13950 if (state->dts_ecbs != NULL) 13951 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13952 13953 if (state->dts_aggregations != NULL) { 13954 #ifdef DEBUG 13955 for (i = 0; i < state->dts_naggregations; i++) 13956 ASSERT(state->dts_aggregations[i] == NULL); 13957 #endif 13958 ASSERT(state->dts_naggregations > 0); 13959 kmem_free(state->dts_aggregations, 13960 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13961 } 13962 13963 kmem_free(state->dts_buffer, bufsize); 13964 kmem_free(state->dts_aggbuffer, bufsize); 13965 13966 for (i = 0; i < nspec; i++) 13967 kmem_free(spec[i].dtsp_buffer, bufsize); 13968 13969 if (spec != NULL) 13970 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13971 13972 dtrace_format_destroy(state); 13973 13974 if (state->dts_aggid_arena != NULL) { 13975 #if defined(sun) 13976 vmem_destroy(state->dts_aggid_arena); 13977 #else 13978 delete_unrhdr(state->dts_aggid_arena); 13979 #endif 13980 state->dts_aggid_arena = NULL; 13981 } 13982 #if defined(sun) 13983 ddi_soft_state_free(dtrace_softstate, minor); 13984 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13985 #endif 13986 } 13987 13988 /* 13989 * DTrace Anonymous Enabling Functions 13990 */ 13991 static dtrace_state_t * 13992 dtrace_anon_grab(void) 13993 { 13994 dtrace_state_t *state; 13995 13996 ASSERT(MUTEX_HELD(&dtrace_lock)); 13997 13998 if ((state = dtrace_anon.dta_state) == NULL) { 13999 ASSERT(dtrace_anon.dta_enabling == NULL); 14000 return (NULL); 14001 } 14002 14003 ASSERT(dtrace_anon.dta_enabling != NULL); 14004 ASSERT(dtrace_retained != NULL); 14005 14006 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 14007 dtrace_anon.dta_enabling = NULL; 14008 dtrace_anon.dta_state = NULL; 14009 14010 return (state); 14011 } 14012 14013 static void 14014 dtrace_anon_property(void) 14015 { 14016 int i, rv; 14017 dtrace_state_t *state; 14018 dof_hdr_t *dof; 14019 char c[32]; /* enough for "dof-data-" + digits */ 14020 14021 ASSERT(MUTEX_HELD(&dtrace_lock)); 14022 ASSERT(MUTEX_HELD(&cpu_lock)); 14023 14024 for (i = 0; ; i++) { 14025 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14026 14027 dtrace_err_verbose = 1; 14028 14029 if ((dof = dtrace_dof_property(c)) == NULL) { 14030 dtrace_err_verbose = 0; 14031 break; 14032 } 14033 14034 #if defined(sun) 14035 /* 14036 * We want to create anonymous state, so we need to transition 14037 * the kernel debugger to indicate that DTrace is active. If 14038 * this fails (e.g. because the debugger has modified text in 14039 * some way), we won't continue with the processing. 14040 */ 14041 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14042 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14043 "enabling ignored."); 14044 dtrace_dof_destroy(dof); 14045 break; 14046 } 14047 #endif 14048 14049 /* 14050 * If we haven't allocated an anonymous state, we'll do so now. 14051 */ 14052 if ((state = dtrace_anon.dta_state) == NULL) { 14053 #if defined(sun) 14054 state = dtrace_state_create(NULL, NULL); 14055 #else 14056 state = dtrace_state_create(NULL); 14057 #endif 14058 dtrace_anon.dta_state = state; 14059 14060 if (state == NULL) { 14061 /* 14062 * This basically shouldn't happen: the only 14063 * failure mode from dtrace_state_create() is a 14064 * failure of ddi_soft_state_zalloc() that 14065 * itself should never happen. Still, the 14066 * interface allows for a failure mode, and 14067 * we want to fail as gracefully as possible: 14068 * we'll emit an error message and cease 14069 * processing anonymous state in this case. 14070 */ 14071 cmn_err(CE_WARN, "failed to create " 14072 "anonymous state"); 14073 dtrace_dof_destroy(dof); 14074 break; 14075 } 14076 } 14077 14078 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14079 &dtrace_anon.dta_enabling, 0, B_TRUE); 14080 14081 if (rv == 0) 14082 rv = dtrace_dof_options(dof, state); 14083 14084 dtrace_err_verbose = 0; 14085 dtrace_dof_destroy(dof); 14086 14087 if (rv != 0) { 14088 /* 14089 * This is malformed DOF; chuck any anonymous state 14090 * that we created. 14091 */ 14092 ASSERT(dtrace_anon.dta_enabling == NULL); 14093 dtrace_state_destroy(state); 14094 dtrace_anon.dta_state = NULL; 14095 break; 14096 } 14097 14098 ASSERT(dtrace_anon.dta_enabling != NULL); 14099 } 14100 14101 if (dtrace_anon.dta_enabling != NULL) { 14102 int rval; 14103 14104 /* 14105 * dtrace_enabling_retain() can only fail because we are 14106 * trying to retain more enablings than are allowed -- but 14107 * we only have one anonymous enabling, and we are guaranteed 14108 * to be allowed at least one retained enabling; we assert 14109 * that dtrace_enabling_retain() returns success. 14110 */ 14111 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14112 ASSERT(rval == 0); 14113 14114 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14115 } 14116 } 14117 14118 /* 14119 * DTrace Helper Functions 14120 */ 14121 static void 14122 dtrace_helper_trace(dtrace_helper_action_t *helper, 14123 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14124 { 14125 uint32_t size, next, nnext, i; 14126 dtrace_helptrace_t *ent; 14127 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14128 14129 if (!dtrace_helptrace_enabled) 14130 return; 14131 14132 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14133 14134 /* 14135 * What would a tracing framework be without its own tracing 14136 * framework? (Well, a hell of a lot simpler, for starters...) 14137 */ 14138 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14139 sizeof (uint64_t) - sizeof (uint64_t); 14140 14141 /* 14142 * Iterate until we can allocate a slot in the trace buffer. 14143 */ 14144 do { 14145 next = dtrace_helptrace_next; 14146 14147 if (next + size < dtrace_helptrace_bufsize) { 14148 nnext = next + size; 14149 } else { 14150 nnext = size; 14151 } 14152 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14153 14154 /* 14155 * We have our slot; fill it in. 14156 */ 14157 if (nnext == size) 14158 next = 0; 14159 14160 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14161 ent->dtht_helper = helper; 14162 ent->dtht_where = where; 14163 ent->dtht_nlocals = vstate->dtvs_nlocals; 14164 14165 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14166 mstate->dtms_fltoffs : -1; 14167 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14168 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14169 14170 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14171 dtrace_statvar_t *svar; 14172 14173 if ((svar = vstate->dtvs_locals[i]) == NULL) 14174 continue; 14175 14176 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14177 ent->dtht_locals[i] = 14178 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14179 } 14180 } 14181 14182 static uint64_t 14183 dtrace_helper(int which, dtrace_mstate_t *mstate, 14184 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14185 { 14186 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14187 uint64_t sarg0 = mstate->dtms_arg[0]; 14188 uint64_t sarg1 = mstate->dtms_arg[1]; 14189 uint64_t rval = 0; 14190 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14191 dtrace_helper_action_t *helper; 14192 dtrace_vstate_t *vstate; 14193 dtrace_difo_t *pred; 14194 int i, trace = dtrace_helptrace_enabled; 14195 14196 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14197 14198 if (helpers == NULL) 14199 return (0); 14200 14201 if ((helper = helpers->dthps_actions[which]) == NULL) 14202 return (0); 14203 14204 vstate = &helpers->dthps_vstate; 14205 mstate->dtms_arg[0] = arg0; 14206 mstate->dtms_arg[1] = arg1; 14207 14208 /* 14209 * Now iterate over each helper. If its predicate evaluates to 'true', 14210 * we'll call the corresponding actions. Note that the below calls 14211 * to dtrace_dif_emulate() may set faults in machine state. This is 14212 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14213 * the stored DIF offset with its own (which is the desired behavior). 14214 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14215 * from machine state; this is okay, too. 14216 */ 14217 for (; helper != NULL; helper = helper->dtha_next) { 14218 if ((pred = helper->dtha_predicate) != NULL) { 14219 if (trace) 14220 dtrace_helper_trace(helper, mstate, vstate, 0); 14221 14222 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14223 goto next; 14224 14225 if (*flags & CPU_DTRACE_FAULT) 14226 goto err; 14227 } 14228 14229 for (i = 0; i < helper->dtha_nactions; i++) { 14230 if (trace) 14231 dtrace_helper_trace(helper, 14232 mstate, vstate, i + 1); 14233 14234 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14235 mstate, vstate, state); 14236 14237 if (*flags & CPU_DTRACE_FAULT) 14238 goto err; 14239 } 14240 14241 next: 14242 if (trace) 14243 dtrace_helper_trace(helper, mstate, vstate, 14244 DTRACE_HELPTRACE_NEXT); 14245 } 14246 14247 if (trace) 14248 dtrace_helper_trace(helper, mstate, vstate, 14249 DTRACE_HELPTRACE_DONE); 14250 14251 /* 14252 * Restore the arg0 that we saved upon entry. 14253 */ 14254 mstate->dtms_arg[0] = sarg0; 14255 mstate->dtms_arg[1] = sarg1; 14256 14257 return (rval); 14258 14259 err: 14260 if (trace) 14261 dtrace_helper_trace(helper, mstate, vstate, 14262 DTRACE_HELPTRACE_ERR); 14263 14264 /* 14265 * Restore the arg0 that we saved upon entry. 14266 */ 14267 mstate->dtms_arg[0] = sarg0; 14268 mstate->dtms_arg[1] = sarg1; 14269 14270 return (0); 14271 } 14272 14273 static void 14274 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14275 dtrace_vstate_t *vstate) 14276 { 14277 int i; 14278 14279 if (helper->dtha_predicate != NULL) 14280 dtrace_difo_release(helper->dtha_predicate, vstate); 14281 14282 for (i = 0; i < helper->dtha_nactions; i++) { 14283 ASSERT(helper->dtha_actions[i] != NULL); 14284 dtrace_difo_release(helper->dtha_actions[i], vstate); 14285 } 14286 14287 kmem_free(helper->dtha_actions, 14288 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14289 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14290 } 14291 14292 static int 14293 dtrace_helper_destroygen(int gen) 14294 { 14295 proc_t *p = curproc; 14296 dtrace_helpers_t *help = p->p_dtrace_helpers; 14297 dtrace_vstate_t *vstate; 14298 int i; 14299 14300 ASSERT(MUTEX_HELD(&dtrace_lock)); 14301 14302 if (help == NULL || gen > help->dthps_generation) 14303 return (EINVAL); 14304 14305 vstate = &help->dthps_vstate; 14306 14307 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14308 dtrace_helper_action_t *last = NULL, *h, *next; 14309 14310 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14311 next = h->dtha_next; 14312 14313 if (h->dtha_generation == gen) { 14314 if (last != NULL) { 14315 last->dtha_next = next; 14316 } else { 14317 help->dthps_actions[i] = next; 14318 } 14319 14320 dtrace_helper_action_destroy(h, vstate); 14321 } else { 14322 last = h; 14323 } 14324 } 14325 } 14326 14327 /* 14328 * Interate until we've cleared out all helper providers with the 14329 * given generation number. 14330 */ 14331 for (;;) { 14332 dtrace_helper_provider_t *prov; 14333 14334 /* 14335 * Look for a helper provider with the right generation. We 14336 * have to start back at the beginning of the list each time 14337 * because we drop dtrace_lock. It's unlikely that we'll make 14338 * more than two passes. 14339 */ 14340 for (i = 0; i < help->dthps_nprovs; i++) { 14341 prov = help->dthps_provs[i]; 14342 14343 if (prov->dthp_generation == gen) 14344 break; 14345 } 14346 14347 /* 14348 * If there were no matches, we're done. 14349 */ 14350 if (i == help->dthps_nprovs) 14351 break; 14352 14353 /* 14354 * Move the last helper provider into this slot. 14355 */ 14356 help->dthps_nprovs--; 14357 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14358 help->dthps_provs[help->dthps_nprovs] = NULL; 14359 14360 mutex_exit(&dtrace_lock); 14361 14362 /* 14363 * If we have a meta provider, remove this helper provider. 14364 */ 14365 mutex_enter(&dtrace_meta_lock); 14366 if (dtrace_meta_pid != NULL) { 14367 ASSERT(dtrace_deferred_pid == NULL); 14368 dtrace_helper_provider_remove(&prov->dthp_prov, 14369 p->p_pid); 14370 } 14371 mutex_exit(&dtrace_meta_lock); 14372 14373 dtrace_helper_provider_destroy(prov); 14374 14375 mutex_enter(&dtrace_lock); 14376 } 14377 14378 return (0); 14379 } 14380 14381 static int 14382 dtrace_helper_validate(dtrace_helper_action_t *helper) 14383 { 14384 int err = 0, i; 14385 dtrace_difo_t *dp; 14386 14387 if ((dp = helper->dtha_predicate) != NULL) 14388 err += dtrace_difo_validate_helper(dp); 14389 14390 for (i = 0; i < helper->dtha_nactions; i++) 14391 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14392 14393 return (err == 0); 14394 } 14395 14396 static int 14397 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14398 { 14399 dtrace_helpers_t *help; 14400 dtrace_helper_action_t *helper, *last; 14401 dtrace_actdesc_t *act; 14402 dtrace_vstate_t *vstate; 14403 dtrace_predicate_t *pred; 14404 int count = 0, nactions = 0, i; 14405 14406 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14407 return (EINVAL); 14408 14409 help = curproc->p_dtrace_helpers; 14410 last = help->dthps_actions[which]; 14411 vstate = &help->dthps_vstate; 14412 14413 for (count = 0; last != NULL; last = last->dtha_next) { 14414 count++; 14415 if (last->dtha_next == NULL) 14416 break; 14417 } 14418 14419 /* 14420 * If we already have dtrace_helper_actions_max helper actions for this 14421 * helper action type, we'll refuse to add a new one. 14422 */ 14423 if (count >= dtrace_helper_actions_max) 14424 return (ENOSPC); 14425 14426 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14427 helper->dtha_generation = help->dthps_generation; 14428 14429 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14430 ASSERT(pred->dtp_difo != NULL); 14431 dtrace_difo_hold(pred->dtp_difo); 14432 helper->dtha_predicate = pred->dtp_difo; 14433 } 14434 14435 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14436 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14437 goto err; 14438 14439 if (act->dtad_difo == NULL) 14440 goto err; 14441 14442 nactions++; 14443 } 14444 14445 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14446 (helper->dtha_nactions = nactions), KM_SLEEP); 14447 14448 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14449 dtrace_difo_hold(act->dtad_difo); 14450 helper->dtha_actions[i++] = act->dtad_difo; 14451 } 14452 14453 if (!dtrace_helper_validate(helper)) 14454 goto err; 14455 14456 if (last == NULL) { 14457 help->dthps_actions[which] = helper; 14458 } else { 14459 last->dtha_next = helper; 14460 } 14461 14462 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14463 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14464 dtrace_helptrace_next = 0; 14465 } 14466 14467 return (0); 14468 err: 14469 dtrace_helper_action_destroy(helper, vstate); 14470 return (EINVAL); 14471 } 14472 14473 static void 14474 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14475 dof_helper_t *dofhp) 14476 { 14477 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14478 14479 mutex_enter(&dtrace_meta_lock); 14480 mutex_enter(&dtrace_lock); 14481 14482 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14483 /* 14484 * If the dtrace module is loaded but not attached, or if 14485 * there aren't isn't a meta provider registered to deal with 14486 * these provider descriptions, we need to postpone creating 14487 * the actual providers until later. 14488 */ 14489 14490 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14491 dtrace_deferred_pid != help) { 14492 help->dthps_deferred = 1; 14493 help->dthps_pid = p->p_pid; 14494 help->dthps_next = dtrace_deferred_pid; 14495 help->dthps_prev = NULL; 14496 if (dtrace_deferred_pid != NULL) 14497 dtrace_deferred_pid->dthps_prev = help; 14498 dtrace_deferred_pid = help; 14499 } 14500 14501 mutex_exit(&dtrace_lock); 14502 14503 } else if (dofhp != NULL) { 14504 /* 14505 * If the dtrace module is loaded and we have a particular 14506 * helper provider description, pass that off to the 14507 * meta provider. 14508 */ 14509 14510 mutex_exit(&dtrace_lock); 14511 14512 dtrace_helper_provide(dofhp, p->p_pid); 14513 14514 } else { 14515 /* 14516 * Otherwise, just pass all the helper provider descriptions 14517 * off to the meta provider. 14518 */ 14519 14520 int i; 14521 mutex_exit(&dtrace_lock); 14522 14523 for (i = 0; i < help->dthps_nprovs; i++) { 14524 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14525 p->p_pid); 14526 } 14527 } 14528 14529 mutex_exit(&dtrace_meta_lock); 14530 } 14531 14532 static int 14533 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14534 { 14535 dtrace_helpers_t *help; 14536 dtrace_helper_provider_t *hprov, **tmp_provs; 14537 uint_t tmp_maxprovs, i; 14538 14539 ASSERT(MUTEX_HELD(&dtrace_lock)); 14540 14541 help = curproc->p_dtrace_helpers; 14542 ASSERT(help != NULL); 14543 14544 /* 14545 * If we already have dtrace_helper_providers_max helper providers, 14546 * we're refuse to add a new one. 14547 */ 14548 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14549 return (ENOSPC); 14550 14551 /* 14552 * Check to make sure this isn't a duplicate. 14553 */ 14554 for (i = 0; i < help->dthps_nprovs; i++) { 14555 if (dofhp->dofhp_addr == 14556 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14557 return (EALREADY); 14558 } 14559 14560 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14561 hprov->dthp_prov = *dofhp; 14562 hprov->dthp_ref = 1; 14563 hprov->dthp_generation = gen; 14564 14565 /* 14566 * Allocate a bigger table for helper providers if it's already full. 14567 */ 14568 if (help->dthps_maxprovs == help->dthps_nprovs) { 14569 tmp_maxprovs = help->dthps_maxprovs; 14570 tmp_provs = help->dthps_provs; 14571 14572 if (help->dthps_maxprovs == 0) 14573 help->dthps_maxprovs = 2; 14574 else 14575 help->dthps_maxprovs *= 2; 14576 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14577 help->dthps_maxprovs = dtrace_helper_providers_max; 14578 14579 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14580 14581 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14582 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14583 14584 if (tmp_provs != NULL) { 14585 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14586 sizeof (dtrace_helper_provider_t *)); 14587 kmem_free(tmp_provs, tmp_maxprovs * 14588 sizeof (dtrace_helper_provider_t *)); 14589 } 14590 } 14591 14592 help->dthps_provs[help->dthps_nprovs] = hprov; 14593 help->dthps_nprovs++; 14594 14595 return (0); 14596 } 14597 14598 static void 14599 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14600 { 14601 mutex_enter(&dtrace_lock); 14602 14603 if (--hprov->dthp_ref == 0) { 14604 dof_hdr_t *dof; 14605 mutex_exit(&dtrace_lock); 14606 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14607 dtrace_dof_destroy(dof); 14608 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14609 } else { 14610 mutex_exit(&dtrace_lock); 14611 } 14612 } 14613 14614 static int 14615 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14616 { 14617 uintptr_t daddr = (uintptr_t)dof; 14618 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14619 dof_provider_t *provider; 14620 dof_probe_t *probe; 14621 uint8_t *arg; 14622 char *strtab, *typestr; 14623 dof_stridx_t typeidx; 14624 size_t typesz; 14625 uint_t nprobes, j, k; 14626 14627 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14628 14629 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14630 dtrace_dof_error(dof, "misaligned section offset"); 14631 return (-1); 14632 } 14633 14634 /* 14635 * The section needs to be large enough to contain the DOF provider 14636 * structure appropriate for the given version. 14637 */ 14638 if (sec->dofs_size < 14639 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14640 offsetof(dof_provider_t, dofpv_prenoffs) : 14641 sizeof (dof_provider_t))) { 14642 dtrace_dof_error(dof, "provider section too small"); 14643 return (-1); 14644 } 14645 14646 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14647 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14648 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14649 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14650 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14651 14652 if (str_sec == NULL || prb_sec == NULL || 14653 arg_sec == NULL || off_sec == NULL) 14654 return (-1); 14655 14656 enoff_sec = NULL; 14657 14658 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14659 provider->dofpv_prenoffs != DOF_SECT_NONE && 14660 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14661 provider->dofpv_prenoffs)) == NULL) 14662 return (-1); 14663 14664 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14665 14666 if (provider->dofpv_name >= str_sec->dofs_size || 14667 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14668 dtrace_dof_error(dof, "invalid provider name"); 14669 return (-1); 14670 } 14671 14672 if (prb_sec->dofs_entsize == 0 || 14673 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14674 dtrace_dof_error(dof, "invalid entry size"); 14675 return (-1); 14676 } 14677 14678 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14679 dtrace_dof_error(dof, "misaligned entry size"); 14680 return (-1); 14681 } 14682 14683 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14684 dtrace_dof_error(dof, "invalid entry size"); 14685 return (-1); 14686 } 14687 14688 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14689 dtrace_dof_error(dof, "misaligned section offset"); 14690 return (-1); 14691 } 14692 14693 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14694 dtrace_dof_error(dof, "invalid entry size"); 14695 return (-1); 14696 } 14697 14698 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14699 14700 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14701 14702 /* 14703 * Take a pass through the probes to check for errors. 14704 */ 14705 for (j = 0; j < nprobes; j++) { 14706 probe = (dof_probe_t *)(uintptr_t)(daddr + 14707 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14708 14709 if (probe->dofpr_func >= str_sec->dofs_size) { 14710 dtrace_dof_error(dof, "invalid function name"); 14711 return (-1); 14712 } 14713 14714 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14715 dtrace_dof_error(dof, "function name too long"); 14716 return (-1); 14717 } 14718 14719 if (probe->dofpr_name >= str_sec->dofs_size || 14720 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14721 dtrace_dof_error(dof, "invalid probe name"); 14722 return (-1); 14723 } 14724 14725 /* 14726 * The offset count must not wrap the index, and the offsets 14727 * must also not overflow the section's data. 14728 */ 14729 if (probe->dofpr_offidx + probe->dofpr_noffs < 14730 probe->dofpr_offidx || 14731 (probe->dofpr_offidx + probe->dofpr_noffs) * 14732 off_sec->dofs_entsize > off_sec->dofs_size) { 14733 dtrace_dof_error(dof, "invalid probe offset"); 14734 return (-1); 14735 } 14736 14737 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14738 /* 14739 * If there's no is-enabled offset section, make sure 14740 * there aren't any is-enabled offsets. Otherwise 14741 * perform the same checks as for probe offsets 14742 * (immediately above). 14743 */ 14744 if (enoff_sec == NULL) { 14745 if (probe->dofpr_enoffidx != 0 || 14746 probe->dofpr_nenoffs != 0) { 14747 dtrace_dof_error(dof, "is-enabled " 14748 "offsets with null section"); 14749 return (-1); 14750 } 14751 } else if (probe->dofpr_enoffidx + 14752 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14753 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14754 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14755 dtrace_dof_error(dof, "invalid is-enabled " 14756 "offset"); 14757 return (-1); 14758 } 14759 14760 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14761 dtrace_dof_error(dof, "zero probe and " 14762 "is-enabled offsets"); 14763 return (-1); 14764 } 14765 } else if (probe->dofpr_noffs == 0) { 14766 dtrace_dof_error(dof, "zero probe offsets"); 14767 return (-1); 14768 } 14769 14770 if (probe->dofpr_argidx + probe->dofpr_xargc < 14771 probe->dofpr_argidx || 14772 (probe->dofpr_argidx + probe->dofpr_xargc) * 14773 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14774 dtrace_dof_error(dof, "invalid args"); 14775 return (-1); 14776 } 14777 14778 typeidx = probe->dofpr_nargv; 14779 typestr = strtab + probe->dofpr_nargv; 14780 for (k = 0; k < probe->dofpr_nargc; k++) { 14781 if (typeidx >= str_sec->dofs_size) { 14782 dtrace_dof_error(dof, "bad " 14783 "native argument type"); 14784 return (-1); 14785 } 14786 14787 typesz = strlen(typestr) + 1; 14788 if (typesz > DTRACE_ARGTYPELEN) { 14789 dtrace_dof_error(dof, "native " 14790 "argument type too long"); 14791 return (-1); 14792 } 14793 typeidx += typesz; 14794 typestr += typesz; 14795 } 14796 14797 typeidx = probe->dofpr_xargv; 14798 typestr = strtab + probe->dofpr_xargv; 14799 for (k = 0; k < probe->dofpr_xargc; k++) { 14800 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14801 dtrace_dof_error(dof, "bad " 14802 "native argument index"); 14803 return (-1); 14804 } 14805 14806 if (typeidx >= str_sec->dofs_size) { 14807 dtrace_dof_error(dof, "bad " 14808 "translated argument type"); 14809 return (-1); 14810 } 14811 14812 typesz = strlen(typestr) + 1; 14813 if (typesz > DTRACE_ARGTYPELEN) { 14814 dtrace_dof_error(dof, "translated argument " 14815 "type too long"); 14816 return (-1); 14817 } 14818 14819 typeidx += typesz; 14820 typestr += typesz; 14821 } 14822 } 14823 14824 return (0); 14825 } 14826 14827 static int 14828 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14829 { 14830 dtrace_helpers_t *help; 14831 dtrace_vstate_t *vstate; 14832 dtrace_enabling_t *enab = NULL; 14833 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14834 uintptr_t daddr = (uintptr_t)dof; 14835 14836 ASSERT(MUTEX_HELD(&dtrace_lock)); 14837 14838 if ((help = curproc->p_dtrace_helpers) == NULL) 14839 help = dtrace_helpers_create(curproc); 14840 14841 vstate = &help->dthps_vstate; 14842 14843 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14844 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14845 dtrace_dof_destroy(dof); 14846 return (rv); 14847 } 14848 14849 /* 14850 * Look for helper providers and validate their descriptions. 14851 */ 14852 if (dhp != NULL) { 14853 for (i = 0; i < dof->dofh_secnum; i++) { 14854 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14855 dof->dofh_secoff + i * dof->dofh_secsize); 14856 14857 if (sec->dofs_type != DOF_SECT_PROVIDER) 14858 continue; 14859 14860 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14861 dtrace_enabling_destroy(enab); 14862 dtrace_dof_destroy(dof); 14863 return (-1); 14864 } 14865 14866 nprovs++; 14867 } 14868 } 14869 14870 /* 14871 * Now we need to walk through the ECB descriptions in the enabling. 14872 */ 14873 for (i = 0; i < enab->dten_ndesc; i++) { 14874 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14875 dtrace_probedesc_t *desc = &ep->dted_probe; 14876 14877 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14878 continue; 14879 14880 if (strcmp(desc->dtpd_mod, "helper") != 0) 14881 continue; 14882 14883 if (strcmp(desc->dtpd_func, "ustack") != 0) 14884 continue; 14885 14886 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14887 ep)) != 0) { 14888 /* 14889 * Adding this helper action failed -- we are now going 14890 * to rip out the entire generation and return failure. 14891 */ 14892 (void) dtrace_helper_destroygen(help->dthps_generation); 14893 dtrace_enabling_destroy(enab); 14894 dtrace_dof_destroy(dof); 14895 return (-1); 14896 } 14897 14898 nhelpers++; 14899 } 14900 14901 if (nhelpers < enab->dten_ndesc) 14902 dtrace_dof_error(dof, "unmatched helpers"); 14903 14904 gen = help->dthps_generation++; 14905 dtrace_enabling_destroy(enab); 14906 14907 if (dhp != NULL && nprovs > 0) { 14908 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14909 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14910 mutex_exit(&dtrace_lock); 14911 dtrace_helper_provider_register(curproc, help, dhp); 14912 mutex_enter(&dtrace_lock); 14913 14914 destroy = 0; 14915 } 14916 } 14917 14918 if (destroy) 14919 dtrace_dof_destroy(dof); 14920 14921 return (gen); 14922 } 14923 14924 static dtrace_helpers_t * 14925 dtrace_helpers_create(proc_t *p) 14926 { 14927 dtrace_helpers_t *help; 14928 14929 ASSERT(MUTEX_HELD(&dtrace_lock)); 14930 ASSERT(p->p_dtrace_helpers == NULL); 14931 14932 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14933 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14934 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14935 14936 p->p_dtrace_helpers = help; 14937 dtrace_helpers++; 14938 14939 return (help); 14940 } 14941 14942 #if defined(sun) 14943 static 14944 #endif 14945 void 14946 dtrace_helpers_destroy(proc_t *p) 14947 { 14948 dtrace_helpers_t *help; 14949 dtrace_vstate_t *vstate; 14950 #if defined(sun) 14951 proc_t *p = curproc; 14952 #endif 14953 int i; 14954 14955 mutex_enter(&dtrace_lock); 14956 14957 ASSERT(p->p_dtrace_helpers != NULL); 14958 ASSERT(dtrace_helpers > 0); 14959 14960 help = p->p_dtrace_helpers; 14961 vstate = &help->dthps_vstate; 14962 14963 /* 14964 * We're now going to lose the help from this process. 14965 */ 14966 p->p_dtrace_helpers = NULL; 14967 dtrace_sync(); 14968 14969 /* 14970 * Destory the helper actions. 14971 */ 14972 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14973 dtrace_helper_action_t *h, *next; 14974 14975 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14976 next = h->dtha_next; 14977 dtrace_helper_action_destroy(h, vstate); 14978 h = next; 14979 } 14980 } 14981 14982 mutex_exit(&dtrace_lock); 14983 14984 /* 14985 * Destroy the helper providers. 14986 */ 14987 if (help->dthps_maxprovs > 0) { 14988 mutex_enter(&dtrace_meta_lock); 14989 if (dtrace_meta_pid != NULL) { 14990 ASSERT(dtrace_deferred_pid == NULL); 14991 14992 for (i = 0; i < help->dthps_nprovs; i++) { 14993 dtrace_helper_provider_remove( 14994 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14995 } 14996 } else { 14997 mutex_enter(&dtrace_lock); 14998 ASSERT(help->dthps_deferred == 0 || 14999 help->dthps_next != NULL || 15000 help->dthps_prev != NULL || 15001 help == dtrace_deferred_pid); 15002 15003 /* 15004 * Remove the helper from the deferred list. 15005 */ 15006 if (help->dthps_next != NULL) 15007 help->dthps_next->dthps_prev = help->dthps_prev; 15008 if (help->dthps_prev != NULL) 15009 help->dthps_prev->dthps_next = help->dthps_next; 15010 if (dtrace_deferred_pid == help) { 15011 dtrace_deferred_pid = help->dthps_next; 15012 ASSERT(help->dthps_prev == NULL); 15013 } 15014 15015 mutex_exit(&dtrace_lock); 15016 } 15017 15018 mutex_exit(&dtrace_meta_lock); 15019 15020 for (i = 0; i < help->dthps_nprovs; i++) { 15021 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15022 } 15023 15024 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15025 sizeof (dtrace_helper_provider_t *)); 15026 } 15027 15028 mutex_enter(&dtrace_lock); 15029 15030 dtrace_vstate_fini(&help->dthps_vstate); 15031 kmem_free(help->dthps_actions, 15032 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15033 kmem_free(help, sizeof (dtrace_helpers_t)); 15034 15035 --dtrace_helpers; 15036 mutex_exit(&dtrace_lock); 15037 } 15038 15039 #if defined(sun) 15040 static 15041 #endif 15042 void 15043 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15044 { 15045 dtrace_helpers_t *help, *newhelp; 15046 dtrace_helper_action_t *helper, *new, *last; 15047 dtrace_difo_t *dp; 15048 dtrace_vstate_t *vstate; 15049 int i, j, sz, hasprovs = 0; 15050 15051 mutex_enter(&dtrace_lock); 15052 ASSERT(from->p_dtrace_helpers != NULL); 15053 ASSERT(dtrace_helpers > 0); 15054 15055 help = from->p_dtrace_helpers; 15056 newhelp = dtrace_helpers_create(to); 15057 ASSERT(to->p_dtrace_helpers != NULL); 15058 15059 newhelp->dthps_generation = help->dthps_generation; 15060 vstate = &newhelp->dthps_vstate; 15061 15062 /* 15063 * Duplicate the helper actions. 15064 */ 15065 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15066 if ((helper = help->dthps_actions[i]) == NULL) 15067 continue; 15068 15069 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15070 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15071 KM_SLEEP); 15072 new->dtha_generation = helper->dtha_generation; 15073 15074 if ((dp = helper->dtha_predicate) != NULL) { 15075 dp = dtrace_difo_duplicate(dp, vstate); 15076 new->dtha_predicate = dp; 15077 } 15078 15079 new->dtha_nactions = helper->dtha_nactions; 15080 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15081 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15082 15083 for (j = 0; j < new->dtha_nactions; j++) { 15084 dtrace_difo_t *dp = helper->dtha_actions[j]; 15085 15086 ASSERT(dp != NULL); 15087 dp = dtrace_difo_duplicate(dp, vstate); 15088 new->dtha_actions[j] = dp; 15089 } 15090 15091 if (last != NULL) { 15092 last->dtha_next = new; 15093 } else { 15094 newhelp->dthps_actions[i] = new; 15095 } 15096 15097 last = new; 15098 } 15099 } 15100 15101 /* 15102 * Duplicate the helper providers and register them with the 15103 * DTrace framework. 15104 */ 15105 if (help->dthps_nprovs > 0) { 15106 newhelp->dthps_nprovs = help->dthps_nprovs; 15107 newhelp->dthps_maxprovs = help->dthps_nprovs; 15108 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15109 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15110 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15111 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15112 newhelp->dthps_provs[i]->dthp_ref++; 15113 } 15114 15115 hasprovs = 1; 15116 } 15117 15118 mutex_exit(&dtrace_lock); 15119 15120 if (hasprovs) 15121 dtrace_helper_provider_register(to, newhelp, NULL); 15122 } 15123 15124 #if defined(sun) 15125 /* 15126 * DTrace Hook Functions 15127 */ 15128 static void 15129 dtrace_module_loaded(modctl_t *ctl) 15130 { 15131 dtrace_provider_t *prv; 15132 15133 mutex_enter(&dtrace_provider_lock); 15134 mutex_enter(&mod_lock); 15135 15136 ASSERT(ctl->mod_busy); 15137 15138 /* 15139 * We're going to call each providers per-module provide operation 15140 * specifying only this module. 15141 */ 15142 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15143 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15144 15145 mutex_exit(&mod_lock); 15146 mutex_exit(&dtrace_provider_lock); 15147 15148 /* 15149 * If we have any retained enablings, we need to match against them. 15150 * Enabling probes requires that cpu_lock be held, and we cannot hold 15151 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15152 * module. (In particular, this happens when loading scheduling 15153 * classes.) So if we have any retained enablings, we need to dispatch 15154 * our task queue to do the match for us. 15155 */ 15156 mutex_enter(&dtrace_lock); 15157 15158 if (dtrace_retained == NULL) { 15159 mutex_exit(&dtrace_lock); 15160 return; 15161 } 15162 15163 (void) taskq_dispatch(dtrace_taskq, 15164 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15165 15166 mutex_exit(&dtrace_lock); 15167 15168 /* 15169 * And now, for a little heuristic sleaze: in general, we want to 15170 * match modules as soon as they load. However, we cannot guarantee 15171 * this, because it would lead us to the lock ordering violation 15172 * outlined above. The common case, of course, is that cpu_lock is 15173 * _not_ held -- so we delay here for a clock tick, hoping that that's 15174 * long enough for the task queue to do its work. If it's not, it's 15175 * not a serious problem -- it just means that the module that we 15176 * just loaded may not be immediately instrumentable. 15177 */ 15178 delay(1); 15179 } 15180 15181 static void 15182 dtrace_module_unloaded(modctl_t *ctl) 15183 { 15184 dtrace_probe_t template, *probe, *first, *next; 15185 dtrace_provider_t *prov; 15186 15187 template.dtpr_mod = ctl->mod_modname; 15188 15189 mutex_enter(&dtrace_provider_lock); 15190 mutex_enter(&mod_lock); 15191 mutex_enter(&dtrace_lock); 15192 15193 if (dtrace_bymod == NULL) { 15194 /* 15195 * The DTrace module is loaded (obviously) but not attached; 15196 * we don't have any work to do. 15197 */ 15198 mutex_exit(&dtrace_provider_lock); 15199 mutex_exit(&mod_lock); 15200 mutex_exit(&dtrace_lock); 15201 return; 15202 } 15203 15204 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15205 probe != NULL; probe = probe->dtpr_nextmod) { 15206 if (probe->dtpr_ecb != NULL) { 15207 mutex_exit(&dtrace_provider_lock); 15208 mutex_exit(&mod_lock); 15209 mutex_exit(&dtrace_lock); 15210 15211 /* 15212 * This shouldn't _actually_ be possible -- we're 15213 * unloading a module that has an enabled probe in it. 15214 * (It's normally up to the provider to make sure that 15215 * this can't happen.) However, because dtps_enable() 15216 * doesn't have a failure mode, there can be an 15217 * enable/unload race. Upshot: we don't want to 15218 * assert, but we're not going to disable the 15219 * probe, either. 15220 */ 15221 if (dtrace_err_verbose) { 15222 cmn_err(CE_WARN, "unloaded module '%s' had " 15223 "enabled probes", ctl->mod_modname); 15224 } 15225 15226 return; 15227 } 15228 } 15229 15230 probe = first; 15231 15232 for (first = NULL; probe != NULL; probe = next) { 15233 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15234 15235 dtrace_probes[probe->dtpr_id - 1] = NULL; 15236 15237 next = probe->dtpr_nextmod; 15238 dtrace_hash_remove(dtrace_bymod, probe); 15239 dtrace_hash_remove(dtrace_byfunc, probe); 15240 dtrace_hash_remove(dtrace_byname, probe); 15241 15242 if (first == NULL) { 15243 first = probe; 15244 probe->dtpr_nextmod = NULL; 15245 } else { 15246 probe->dtpr_nextmod = first; 15247 first = probe; 15248 } 15249 } 15250 15251 /* 15252 * We've removed all of the module's probes from the hash chains and 15253 * from the probe array. Now issue a dtrace_sync() to be sure that 15254 * everyone has cleared out from any probe array processing. 15255 */ 15256 dtrace_sync(); 15257 15258 for (probe = first; probe != NULL; probe = first) { 15259 first = probe->dtpr_nextmod; 15260 prov = probe->dtpr_provider; 15261 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15262 probe->dtpr_arg); 15263 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15264 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15265 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15266 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15267 kmem_free(probe, sizeof (dtrace_probe_t)); 15268 } 15269 15270 mutex_exit(&dtrace_lock); 15271 mutex_exit(&mod_lock); 15272 mutex_exit(&dtrace_provider_lock); 15273 } 15274 15275 static void 15276 dtrace_suspend(void) 15277 { 15278 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15279 } 15280 15281 static void 15282 dtrace_resume(void) 15283 { 15284 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15285 } 15286 #endif 15287 15288 static int 15289 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15290 { 15291 ASSERT(MUTEX_HELD(&cpu_lock)); 15292 mutex_enter(&dtrace_lock); 15293 15294 switch (what) { 15295 case CPU_CONFIG: { 15296 dtrace_state_t *state; 15297 dtrace_optval_t *opt, rs, c; 15298 15299 /* 15300 * For now, we only allocate a new buffer for anonymous state. 15301 */ 15302 if ((state = dtrace_anon.dta_state) == NULL) 15303 break; 15304 15305 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15306 break; 15307 15308 opt = state->dts_options; 15309 c = opt[DTRACEOPT_CPU]; 15310 15311 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15312 break; 15313 15314 /* 15315 * Regardless of what the actual policy is, we're going to 15316 * temporarily set our resize policy to be manual. We're 15317 * also going to temporarily set our CPU option to denote 15318 * the newly configured CPU. 15319 */ 15320 rs = opt[DTRACEOPT_BUFRESIZE]; 15321 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15322 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15323 15324 (void) dtrace_state_buffers(state); 15325 15326 opt[DTRACEOPT_BUFRESIZE] = rs; 15327 opt[DTRACEOPT_CPU] = c; 15328 15329 break; 15330 } 15331 15332 case CPU_UNCONFIG: 15333 /* 15334 * We don't free the buffer in the CPU_UNCONFIG case. (The 15335 * buffer will be freed when the consumer exits.) 15336 */ 15337 break; 15338 15339 default: 15340 break; 15341 } 15342 15343 mutex_exit(&dtrace_lock); 15344 return (0); 15345 } 15346 15347 #if defined(sun) 15348 static void 15349 dtrace_cpu_setup_initial(processorid_t cpu) 15350 { 15351 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15352 } 15353 #endif 15354 15355 static void 15356 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15357 { 15358 if (dtrace_toxranges >= dtrace_toxranges_max) { 15359 int osize, nsize; 15360 dtrace_toxrange_t *range; 15361 15362 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15363 15364 if (osize == 0) { 15365 ASSERT(dtrace_toxrange == NULL); 15366 ASSERT(dtrace_toxranges_max == 0); 15367 dtrace_toxranges_max = 1; 15368 } else { 15369 dtrace_toxranges_max <<= 1; 15370 } 15371 15372 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15373 range = kmem_zalloc(nsize, KM_SLEEP); 15374 15375 if (dtrace_toxrange != NULL) { 15376 ASSERT(osize != 0); 15377 bcopy(dtrace_toxrange, range, osize); 15378 kmem_free(dtrace_toxrange, osize); 15379 } 15380 15381 dtrace_toxrange = range; 15382 } 15383 15384 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15385 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15386 15387 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15388 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15389 dtrace_toxranges++; 15390 } 15391 15392 /* 15393 * DTrace Driver Cookbook Functions 15394 */ 15395 #if defined(sun) 15396 /*ARGSUSED*/ 15397 static int 15398 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15399 { 15400 dtrace_provider_id_t id; 15401 dtrace_state_t *state = NULL; 15402 dtrace_enabling_t *enab; 15403 15404 mutex_enter(&cpu_lock); 15405 mutex_enter(&dtrace_provider_lock); 15406 mutex_enter(&dtrace_lock); 15407 15408 if (ddi_soft_state_init(&dtrace_softstate, 15409 sizeof (dtrace_state_t), 0) != 0) { 15410 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15411 mutex_exit(&cpu_lock); 15412 mutex_exit(&dtrace_provider_lock); 15413 mutex_exit(&dtrace_lock); 15414 return (DDI_FAILURE); 15415 } 15416 15417 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15418 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15419 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15420 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15421 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15422 ddi_remove_minor_node(devi, NULL); 15423 ddi_soft_state_fini(&dtrace_softstate); 15424 mutex_exit(&cpu_lock); 15425 mutex_exit(&dtrace_provider_lock); 15426 mutex_exit(&dtrace_lock); 15427 return (DDI_FAILURE); 15428 } 15429 15430 ddi_report_dev(devi); 15431 dtrace_devi = devi; 15432 15433 dtrace_modload = dtrace_module_loaded; 15434 dtrace_modunload = dtrace_module_unloaded; 15435 dtrace_cpu_init = dtrace_cpu_setup_initial; 15436 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15437 dtrace_helpers_fork = dtrace_helpers_duplicate; 15438 dtrace_cpustart_init = dtrace_suspend; 15439 dtrace_cpustart_fini = dtrace_resume; 15440 dtrace_debugger_init = dtrace_suspend; 15441 dtrace_debugger_fini = dtrace_resume; 15442 15443 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15444 15445 ASSERT(MUTEX_HELD(&cpu_lock)); 15446 15447 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15448 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15449 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15450 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15451 VM_SLEEP | VMC_IDENTIFIER); 15452 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15453 1, INT_MAX, 0); 15454 15455 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15456 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15457 NULL, NULL, NULL, NULL, NULL, 0); 15458 15459 ASSERT(MUTEX_HELD(&cpu_lock)); 15460 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15461 offsetof(dtrace_probe_t, dtpr_nextmod), 15462 offsetof(dtrace_probe_t, dtpr_prevmod)); 15463 15464 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15465 offsetof(dtrace_probe_t, dtpr_nextfunc), 15466 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15467 15468 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15469 offsetof(dtrace_probe_t, dtpr_nextname), 15470 offsetof(dtrace_probe_t, dtpr_prevname)); 15471 15472 if (dtrace_retain_max < 1) { 15473 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15474 "setting to 1", dtrace_retain_max); 15475 dtrace_retain_max = 1; 15476 } 15477 15478 /* 15479 * Now discover our toxic ranges. 15480 */ 15481 dtrace_toxic_ranges(dtrace_toxrange_add); 15482 15483 /* 15484 * Before we register ourselves as a provider to our own framework, 15485 * we would like to assert that dtrace_provider is NULL -- but that's 15486 * not true if we were loaded as a dependency of a DTrace provider. 15487 * Once we've registered, we can assert that dtrace_provider is our 15488 * pseudo provider. 15489 */ 15490 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15491 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15492 15493 ASSERT(dtrace_provider != NULL); 15494 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15495 15496 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15497 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15498 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15499 dtrace_provider, NULL, NULL, "END", 0, NULL); 15500 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15501 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15502 15503 dtrace_anon_property(); 15504 mutex_exit(&cpu_lock); 15505 15506 /* 15507 * If DTrace helper tracing is enabled, we need to allocate the 15508 * trace buffer and initialize the values. 15509 */ 15510 if (dtrace_helptrace_enabled) { 15511 ASSERT(dtrace_helptrace_buffer == NULL); 15512 dtrace_helptrace_buffer = 15513 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15514 dtrace_helptrace_next = 0; 15515 } 15516 15517 /* 15518 * If there are already providers, we must ask them to provide their 15519 * probes, and then match any anonymous enabling against them. Note 15520 * that there should be no other retained enablings at this time: 15521 * the only retained enablings at this time should be the anonymous 15522 * enabling. 15523 */ 15524 if (dtrace_anon.dta_enabling != NULL) { 15525 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15526 15527 dtrace_enabling_provide(NULL); 15528 state = dtrace_anon.dta_state; 15529 15530 /* 15531 * We couldn't hold cpu_lock across the above call to 15532 * dtrace_enabling_provide(), but we must hold it to actually 15533 * enable the probes. We have to drop all of our locks, pick 15534 * up cpu_lock, and regain our locks before matching the 15535 * retained anonymous enabling. 15536 */ 15537 mutex_exit(&dtrace_lock); 15538 mutex_exit(&dtrace_provider_lock); 15539 15540 mutex_enter(&cpu_lock); 15541 mutex_enter(&dtrace_provider_lock); 15542 mutex_enter(&dtrace_lock); 15543 15544 if ((enab = dtrace_anon.dta_enabling) != NULL) 15545 (void) dtrace_enabling_match(enab, NULL); 15546 15547 mutex_exit(&cpu_lock); 15548 } 15549 15550 mutex_exit(&dtrace_lock); 15551 mutex_exit(&dtrace_provider_lock); 15552 15553 if (state != NULL) { 15554 /* 15555 * If we created any anonymous state, set it going now. 15556 */ 15557 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15558 } 15559 15560 return (DDI_SUCCESS); 15561 } 15562 #endif 15563 15564 #if !defined(sun) 15565 #if __FreeBSD_version >= 800039 15566 static void dtrace_dtr(void *); 15567 #endif 15568 #endif 15569 15570 /*ARGSUSED*/ 15571 static int 15572 #if defined(sun) 15573 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15574 #else 15575 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15576 #endif 15577 { 15578 dtrace_state_t *state; 15579 uint32_t priv; 15580 uid_t uid; 15581 zoneid_t zoneid; 15582 15583 #if defined(sun) 15584 if (getminor(*devp) == DTRACEMNRN_HELPER) 15585 return (0); 15586 15587 /* 15588 * If this wasn't an open with the "helper" minor, then it must be 15589 * the "dtrace" minor. 15590 */ 15591 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15592 #else 15593 cred_t *cred_p = NULL; 15594 15595 #if __FreeBSD_version < 800039 15596 /* 15597 * The first minor device is the one that is cloned so there is 15598 * nothing more to do here. 15599 */ 15600 if (dev2unit(dev) == 0) 15601 return 0; 15602 15603 /* 15604 * Devices are cloned, so if the DTrace state has already 15605 * been allocated, that means this device belongs to a 15606 * different client. Each client should open '/dev/dtrace' 15607 * to get a cloned device. 15608 */ 15609 if (dev->si_drv1 != NULL) 15610 return (EBUSY); 15611 #endif 15612 15613 cred_p = dev->si_cred; 15614 #endif 15615 15616 /* 15617 * If no DTRACE_PRIV_* bits are set in the credential, then the 15618 * caller lacks sufficient permission to do anything with DTrace. 15619 */ 15620 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15621 if (priv == DTRACE_PRIV_NONE) { 15622 #if !defined(sun) 15623 #if __FreeBSD_version < 800039 15624 /* Destroy the cloned device. */ 15625 destroy_dev(dev); 15626 #endif 15627 #endif 15628 15629 return (EACCES); 15630 } 15631 15632 /* 15633 * Ask all providers to provide all their probes. 15634 */ 15635 mutex_enter(&dtrace_provider_lock); 15636 dtrace_probe_provide(NULL, NULL); 15637 mutex_exit(&dtrace_provider_lock); 15638 15639 mutex_enter(&cpu_lock); 15640 mutex_enter(&dtrace_lock); 15641 dtrace_opens++; 15642 dtrace_membar_producer(); 15643 15644 #if defined(sun) 15645 /* 15646 * If the kernel debugger is active (that is, if the kernel debugger 15647 * modified text in some way), we won't allow the open. 15648 */ 15649 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15650 dtrace_opens--; 15651 mutex_exit(&cpu_lock); 15652 mutex_exit(&dtrace_lock); 15653 return (EBUSY); 15654 } 15655 15656 state = dtrace_state_create(devp, cred_p); 15657 #else 15658 state = dtrace_state_create(dev); 15659 #if __FreeBSD_version < 800039 15660 dev->si_drv1 = state; 15661 #else 15662 devfs_set_cdevpriv(state, dtrace_dtr); 15663 #endif 15664 /* This code actually belongs in dtrace_attach() */ 15665 if (dtrace_opens == 1) 15666 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15667 1, INT_MAX, 0); 15668 #endif 15669 15670 mutex_exit(&cpu_lock); 15671 15672 if (state == NULL) { 15673 #if defined(sun) 15674 if (--dtrace_opens == 0) 15675 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15676 #else 15677 --dtrace_opens; 15678 #endif 15679 mutex_exit(&dtrace_lock); 15680 #if !defined(sun) 15681 #if __FreeBSD_version < 800039 15682 /* Destroy the cloned device. */ 15683 destroy_dev(dev); 15684 #endif 15685 #endif 15686 return (EAGAIN); 15687 } 15688 15689 mutex_exit(&dtrace_lock); 15690 15691 return (0); 15692 } 15693 15694 /*ARGSUSED*/ 15695 #if defined(sun) 15696 static int 15697 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15698 #elif __FreeBSD_version < 800039 15699 static int 15700 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15701 #else 15702 static void 15703 dtrace_dtr(void *data) 15704 #endif 15705 { 15706 #if defined(sun) 15707 minor_t minor = getminor(dev); 15708 dtrace_state_t *state; 15709 15710 if (minor == DTRACEMNRN_HELPER) 15711 return (0); 15712 15713 state = ddi_get_soft_state(dtrace_softstate, minor); 15714 #else 15715 #if __FreeBSD_version < 800039 15716 dtrace_state_t *state = dev->si_drv1; 15717 15718 /* Check if this is not a cloned device. */ 15719 if (dev2unit(dev) == 0) 15720 return (0); 15721 #else 15722 dtrace_state_t *state = data; 15723 #endif 15724 15725 #endif 15726 15727 mutex_enter(&cpu_lock); 15728 mutex_enter(&dtrace_lock); 15729 15730 if (state != NULL) { 15731 if (state->dts_anon) { 15732 /* 15733 * There is anonymous state. Destroy that first. 15734 */ 15735 ASSERT(dtrace_anon.dta_state == NULL); 15736 dtrace_state_destroy(state->dts_anon); 15737 } 15738 15739 dtrace_state_destroy(state); 15740 15741 #if !defined(sun) 15742 kmem_free(state, 0); 15743 #if __FreeBSD_version < 800039 15744 dev->si_drv1 = NULL; 15745 #endif 15746 #endif 15747 } 15748 15749 ASSERT(dtrace_opens > 0); 15750 #if defined(sun) 15751 if (--dtrace_opens == 0) 15752 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15753 #else 15754 --dtrace_opens; 15755 /* This code actually belongs in dtrace_detach() */ 15756 if ((dtrace_opens == 0) && (dtrace_taskq != NULL)) { 15757 taskq_destroy(dtrace_taskq); 15758 dtrace_taskq = NULL; 15759 } 15760 #endif 15761 15762 mutex_exit(&dtrace_lock); 15763 mutex_exit(&cpu_lock); 15764 15765 #if __FreeBSD_version < 800039 15766 /* Schedule this cloned device to be destroyed. */ 15767 destroy_dev_sched(dev); 15768 #endif 15769 15770 #if defined(sun) || __FreeBSD_version < 800039 15771 return (0); 15772 #endif 15773 } 15774 15775 #if defined(sun) 15776 /*ARGSUSED*/ 15777 static int 15778 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15779 { 15780 int rval; 15781 dof_helper_t help, *dhp = NULL; 15782 15783 switch (cmd) { 15784 case DTRACEHIOC_ADDDOF: 15785 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15786 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15787 return (EFAULT); 15788 } 15789 15790 dhp = &help; 15791 arg = (intptr_t)help.dofhp_dof; 15792 /*FALLTHROUGH*/ 15793 15794 case DTRACEHIOC_ADD: { 15795 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15796 15797 if (dof == NULL) 15798 return (rval); 15799 15800 mutex_enter(&dtrace_lock); 15801 15802 /* 15803 * dtrace_helper_slurp() takes responsibility for the dof -- 15804 * it may free it now or it may save it and free it later. 15805 */ 15806 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15807 *rv = rval; 15808 rval = 0; 15809 } else { 15810 rval = EINVAL; 15811 } 15812 15813 mutex_exit(&dtrace_lock); 15814 return (rval); 15815 } 15816 15817 case DTRACEHIOC_REMOVE: { 15818 mutex_enter(&dtrace_lock); 15819 rval = dtrace_helper_destroygen(arg); 15820 mutex_exit(&dtrace_lock); 15821 15822 return (rval); 15823 } 15824 15825 default: 15826 break; 15827 } 15828 15829 return (ENOTTY); 15830 } 15831 15832 /*ARGSUSED*/ 15833 static int 15834 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15835 { 15836 minor_t minor = getminor(dev); 15837 dtrace_state_t *state; 15838 int rval; 15839 15840 if (minor == DTRACEMNRN_HELPER) 15841 return (dtrace_ioctl_helper(cmd, arg, rv)); 15842 15843 state = ddi_get_soft_state(dtrace_softstate, minor); 15844 15845 if (state->dts_anon) { 15846 ASSERT(dtrace_anon.dta_state == NULL); 15847 state = state->dts_anon; 15848 } 15849 15850 switch (cmd) { 15851 case DTRACEIOC_PROVIDER: { 15852 dtrace_providerdesc_t pvd; 15853 dtrace_provider_t *pvp; 15854 15855 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15856 return (EFAULT); 15857 15858 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15859 mutex_enter(&dtrace_provider_lock); 15860 15861 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15862 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15863 break; 15864 } 15865 15866 mutex_exit(&dtrace_provider_lock); 15867 15868 if (pvp == NULL) 15869 return (ESRCH); 15870 15871 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15872 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15873 15874 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15875 return (EFAULT); 15876 15877 return (0); 15878 } 15879 15880 case DTRACEIOC_EPROBE: { 15881 dtrace_eprobedesc_t epdesc; 15882 dtrace_ecb_t *ecb; 15883 dtrace_action_t *act; 15884 void *buf; 15885 size_t size; 15886 uintptr_t dest; 15887 int nrecs; 15888 15889 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15890 return (EFAULT); 15891 15892 mutex_enter(&dtrace_lock); 15893 15894 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15895 mutex_exit(&dtrace_lock); 15896 return (EINVAL); 15897 } 15898 15899 if (ecb->dte_probe == NULL) { 15900 mutex_exit(&dtrace_lock); 15901 return (EINVAL); 15902 } 15903 15904 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15905 epdesc.dtepd_uarg = ecb->dte_uarg; 15906 epdesc.dtepd_size = ecb->dte_size; 15907 15908 nrecs = epdesc.dtepd_nrecs; 15909 epdesc.dtepd_nrecs = 0; 15910 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15911 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15912 continue; 15913 15914 epdesc.dtepd_nrecs++; 15915 } 15916 15917 /* 15918 * Now that we have the size, we need to allocate a temporary 15919 * buffer in which to store the complete description. We need 15920 * the temporary buffer to be able to drop dtrace_lock() 15921 * across the copyout(), below. 15922 */ 15923 size = sizeof (dtrace_eprobedesc_t) + 15924 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15925 15926 buf = kmem_alloc(size, KM_SLEEP); 15927 dest = (uintptr_t)buf; 15928 15929 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15930 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15931 15932 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15933 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15934 continue; 15935 15936 if (nrecs-- == 0) 15937 break; 15938 15939 bcopy(&act->dta_rec, (void *)dest, 15940 sizeof (dtrace_recdesc_t)); 15941 dest += sizeof (dtrace_recdesc_t); 15942 } 15943 15944 mutex_exit(&dtrace_lock); 15945 15946 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15947 kmem_free(buf, size); 15948 return (EFAULT); 15949 } 15950 15951 kmem_free(buf, size); 15952 return (0); 15953 } 15954 15955 case DTRACEIOC_AGGDESC: { 15956 dtrace_aggdesc_t aggdesc; 15957 dtrace_action_t *act; 15958 dtrace_aggregation_t *agg; 15959 int nrecs; 15960 uint32_t offs; 15961 dtrace_recdesc_t *lrec; 15962 void *buf; 15963 size_t size; 15964 uintptr_t dest; 15965 15966 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15967 return (EFAULT); 15968 15969 mutex_enter(&dtrace_lock); 15970 15971 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15972 mutex_exit(&dtrace_lock); 15973 return (EINVAL); 15974 } 15975 15976 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15977 15978 nrecs = aggdesc.dtagd_nrecs; 15979 aggdesc.dtagd_nrecs = 0; 15980 15981 offs = agg->dtag_base; 15982 lrec = &agg->dtag_action.dta_rec; 15983 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15984 15985 for (act = agg->dtag_first; ; act = act->dta_next) { 15986 ASSERT(act->dta_intuple || 15987 DTRACEACT_ISAGG(act->dta_kind)); 15988 15989 /* 15990 * If this action has a record size of zero, it 15991 * denotes an argument to the aggregating action. 15992 * Because the presence of this record doesn't (or 15993 * shouldn't) affect the way the data is interpreted, 15994 * we don't copy it out to save user-level the 15995 * confusion of dealing with a zero-length record. 15996 */ 15997 if (act->dta_rec.dtrd_size == 0) { 15998 ASSERT(agg->dtag_hasarg); 15999 continue; 16000 } 16001 16002 aggdesc.dtagd_nrecs++; 16003 16004 if (act == &agg->dtag_action) 16005 break; 16006 } 16007 16008 /* 16009 * Now that we have the size, we need to allocate a temporary 16010 * buffer in which to store the complete description. We need 16011 * the temporary buffer to be able to drop dtrace_lock() 16012 * across the copyout(), below. 16013 */ 16014 size = sizeof (dtrace_aggdesc_t) + 16015 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16016 16017 buf = kmem_alloc(size, KM_SLEEP); 16018 dest = (uintptr_t)buf; 16019 16020 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16021 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16022 16023 for (act = agg->dtag_first; ; act = act->dta_next) { 16024 dtrace_recdesc_t rec = act->dta_rec; 16025 16026 /* 16027 * See the comment in the above loop for why we pass 16028 * over zero-length records. 16029 */ 16030 if (rec.dtrd_size == 0) { 16031 ASSERT(agg->dtag_hasarg); 16032 continue; 16033 } 16034 16035 if (nrecs-- == 0) 16036 break; 16037 16038 rec.dtrd_offset -= offs; 16039 bcopy(&rec, (void *)dest, sizeof (rec)); 16040 dest += sizeof (dtrace_recdesc_t); 16041 16042 if (act == &agg->dtag_action) 16043 break; 16044 } 16045 16046 mutex_exit(&dtrace_lock); 16047 16048 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16049 kmem_free(buf, size); 16050 return (EFAULT); 16051 } 16052 16053 kmem_free(buf, size); 16054 return (0); 16055 } 16056 16057 case DTRACEIOC_ENABLE: { 16058 dof_hdr_t *dof; 16059 dtrace_enabling_t *enab = NULL; 16060 dtrace_vstate_t *vstate; 16061 int err = 0; 16062 16063 *rv = 0; 16064 16065 /* 16066 * If a NULL argument has been passed, we take this as our 16067 * cue to reevaluate our enablings. 16068 */ 16069 if (arg == NULL) { 16070 dtrace_enabling_matchall(); 16071 16072 return (0); 16073 } 16074 16075 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16076 return (rval); 16077 16078 mutex_enter(&cpu_lock); 16079 mutex_enter(&dtrace_lock); 16080 vstate = &state->dts_vstate; 16081 16082 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16083 mutex_exit(&dtrace_lock); 16084 mutex_exit(&cpu_lock); 16085 dtrace_dof_destroy(dof); 16086 return (EBUSY); 16087 } 16088 16089 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16090 mutex_exit(&dtrace_lock); 16091 mutex_exit(&cpu_lock); 16092 dtrace_dof_destroy(dof); 16093 return (EINVAL); 16094 } 16095 16096 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16097 dtrace_enabling_destroy(enab); 16098 mutex_exit(&dtrace_lock); 16099 mutex_exit(&cpu_lock); 16100 dtrace_dof_destroy(dof); 16101 return (rval); 16102 } 16103 16104 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16105 err = dtrace_enabling_retain(enab); 16106 } else { 16107 dtrace_enabling_destroy(enab); 16108 } 16109 16110 mutex_exit(&cpu_lock); 16111 mutex_exit(&dtrace_lock); 16112 dtrace_dof_destroy(dof); 16113 16114 return (err); 16115 } 16116 16117 case DTRACEIOC_REPLICATE: { 16118 dtrace_repldesc_t desc; 16119 dtrace_probedesc_t *match = &desc.dtrpd_match; 16120 dtrace_probedesc_t *create = &desc.dtrpd_create; 16121 int err; 16122 16123 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16124 return (EFAULT); 16125 16126 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16127 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16128 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16129 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16130 16131 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16132 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16133 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16134 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16135 16136 mutex_enter(&dtrace_lock); 16137 err = dtrace_enabling_replicate(state, match, create); 16138 mutex_exit(&dtrace_lock); 16139 16140 return (err); 16141 } 16142 16143 case DTRACEIOC_PROBEMATCH: 16144 case DTRACEIOC_PROBES: { 16145 dtrace_probe_t *probe = NULL; 16146 dtrace_probedesc_t desc; 16147 dtrace_probekey_t pkey; 16148 dtrace_id_t i; 16149 int m = 0; 16150 uint32_t priv; 16151 uid_t uid; 16152 zoneid_t zoneid; 16153 16154 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16155 return (EFAULT); 16156 16157 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16158 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16159 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16160 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16161 16162 /* 16163 * Before we attempt to match this probe, we want to give 16164 * all providers the opportunity to provide it. 16165 */ 16166 if (desc.dtpd_id == DTRACE_IDNONE) { 16167 mutex_enter(&dtrace_provider_lock); 16168 dtrace_probe_provide(&desc, NULL); 16169 mutex_exit(&dtrace_provider_lock); 16170 desc.dtpd_id++; 16171 } 16172 16173 if (cmd == DTRACEIOC_PROBEMATCH) { 16174 dtrace_probekey(&desc, &pkey); 16175 pkey.dtpk_id = DTRACE_IDNONE; 16176 } 16177 16178 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16179 16180 mutex_enter(&dtrace_lock); 16181 16182 if (cmd == DTRACEIOC_PROBEMATCH) { 16183 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16184 if ((probe = dtrace_probes[i - 1]) != NULL && 16185 (m = dtrace_match_probe(probe, &pkey, 16186 priv, uid, zoneid)) != 0) 16187 break; 16188 } 16189 16190 if (m < 0) { 16191 mutex_exit(&dtrace_lock); 16192 return (EINVAL); 16193 } 16194 16195 } else { 16196 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16197 if ((probe = dtrace_probes[i - 1]) != NULL && 16198 dtrace_match_priv(probe, priv, uid, zoneid)) 16199 break; 16200 } 16201 } 16202 16203 if (probe == NULL) { 16204 mutex_exit(&dtrace_lock); 16205 return (ESRCH); 16206 } 16207 16208 dtrace_probe_description(probe, &desc); 16209 mutex_exit(&dtrace_lock); 16210 16211 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16212 return (EFAULT); 16213 16214 return (0); 16215 } 16216 16217 case DTRACEIOC_PROBEARG: { 16218 dtrace_argdesc_t desc; 16219 dtrace_probe_t *probe; 16220 dtrace_provider_t *prov; 16221 16222 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16223 return (EFAULT); 16224 16225 if (desc.dtargd_id == DTRACE_IDNONE) 16226 return (EINVAL); 16227 16228 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16229 return (EINVAL); 16230 16231 mutex_enter(&dtrace_provider_lock); 16232 mutex_enter(&mod_lock); 16233 mutex_enter(&dtrace_lock); 16234 16235 if (desc.dtargd_id > dtrace_nprobes) { 16236 mutex_exit(&dtrace_lock); 16237 mutex_exit(&mod_lock); 16238 mutex_exit(&dtrace_provider_lock); 16239 return (EINVAL); 16240 } 16241 16242 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16243 mutex_exit(&dtrace_lock); 16244 mutex_exit(&mod_lock); 16245 mutex_exit(&dtrace_provider_lock); 16246 return (EINVAL); 16247 } 16248 16249 mutex_exit(&dtrace_lock); 16250 16251 prov = probe->dtpr_provider; 16252 16253 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16254 /* 16255 * There isn't any typed information for this probe. 16256 * Set the argument number to DTRACE_ARGNONE. 16257 */ 16258 desc.dtargd_ndx = DTRACE_ARGNONE; 16259 } else { 16260 desc.dtargd_native[0] = '\0'; 16261 desc.dtargd_xlate[0] = '\0'; 16262 desc.dtargd_mapping = desc.dtargd_ndx; 16263 16264 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16265 probe->dtpr_id, probe->dtpr_arg, &desc); 16266 } 16267 16268 mutex_exit(&mod_lock); 16269 mutex_exit(&dtrace_provider_lock); 16270 16271 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16272 return (EFAULT); 16273 16274 return (0); 16275 } 16276 16277 case DTRACEIOC_GO: { 16278 processorid_t cpuid; 16279 rval = dtrace_state_go(state, &cpuid); 16280 16281 if (rval != 0) 16282 return (rval); 16283 16284 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16285 return (EFAULT); 16286 16287 return (0); 16288 } 16289 16290 case DTRACEIOC_STOP: { 16291 processorid_t cpuid; 16292 16293 mutex_enter(&dtrace_lock); 16294 rval = dtrace_state_stop(state, &cpuid); 16295 mutex_exit(&dtrace_lock); 16296 16297 if (rval != 0) 16298 return (rval); 16299 16300 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16301 return (EFAULT); 16302 16303 return (0); 16304 } 16305 16306 case DTRACEIOC_DOFGET: { 16307 dof_hdr_t hdr, *dof; 16308 uint64_t len; 16309 16310 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16311 return (EFAULT); 16312 16313 mutex_enter(&dtrace_lock); 16314 dof = dtrace_dof_create(state); 16315 mutex_exit(&dtrace_lock); 16316 16317 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16318 rval = copyout(dof, (void *)arg, len); 16319 dtrace_dof_destroy(dof); 16320 16321 return (rval == 0 ? 0 : EFAULT); 16322 } 16323 16324 case DTRACEIOC_AGGSNAP: 16325 case DTRACEIOC_BUFSNAP: { 16326 dtrace_bufdesc_t desc; 16327 caddr_t cached; 16328 dtrace_buffer_t *buf; 16329 16330 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16331 return (EFAULT); 16332 16333 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16334 return (EINVAL); 16335 16336 mutex_enter(&dtrace_lock); 16337 16338 if (cmd == DTRACEIOC_BUFSNAP) { 16339 buf = &state->dts_buffer[desc.dtbd_cpu]; 16340 } else { 16341 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16342 } 16343 16344 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16345 size_t sz = buf->dtb_offset; 16346 16347 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16348 mutex_exit(&dtrace_lock); 16349 return (EBUSY); 16350 } 16351 16352 /* 16353 * If this buffer has already been consumed, we're 16354 * going to indicate that there's nothing left here 16355 * to consume. 16356 */ 16357 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16358 mutex_exit(&dtrace_lock); 16359 16360 desc.dtbd_size = 0; 16361 desc.dtbd_drops = 0; 16362 desc.dtbd_errors = 0; 16363 desc.dtbd_oldest = 0; 16364 sz = sizeof (desc); 16365 16366 if (copyout(&desc, (void *)arg, sz) != 0) 16367 return (EFAULT); 16368 16369 return (0); 16370 } 16371 16372 /* 16373 * If this is a ring buffer that has wrapped, we want 16374 * to copy the whole thing out. 16375 */ 16376 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16377 dtrace_buffer_polish(buf); 16378 sz = buf->dtb_size; 16379 } 16380 16381 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16382 mutex_exit(&dtrace_lock); 16383 return (EFAULT); 16384 } 16385 16386 desc.dtbd_size = sz; 16387 desc.dtbd_drops = buf->dtb_drops; 16388 desc.dtbd_errors = buf->dtb_errors; 16389 desc.dtbd_oldest = buf->dtb_xamot_offset; 16390 16391 mutex_exit(&dtrace_lock); 16392 16393 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16394 return (EFAULT); 16395 16396 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16397 16398 return (0); 16399 } 16400 16401 if (buf->dtb_tomax == NULL) { 16402 ASSERT(buf->dtb_xamot == NULL); 16403 mutex_exit(&dtrace_lock); 16404 return (ENOENT); 16405 } 16406 16407 cached = buf->dtb_tomax; 16408 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16409 16410 dtrace_xcall(desc.dtbd_cpu, 16411 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16412 16413 state->dts_errors += buf->dtb_xamot_errors; 16414 16415 /* 16416 * If the buffers did not actually switch, then the cross call 16417 * did not take place -- presumably because the given CPU is 16418 * not in the ready set. If this is the case, we'll return 16419 * ENOENT. 16420 */ 16421 if (buf->dtb_tomax == cached) { 16422 ASSERT(buf->dtb_xamot != cached); 16423 mutex_exit(&dtrace_lock); 16424 return (ENOENT); 16425 } 16426 16427 ASSERT(cached == buf->dtb_xamot); 16428 16429 /* 16430 * We have our snapshot; now copy it out. 16431 */ 16432 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16433 buf->dtb_xamot_offset) != 0) { 16434 mutex_exit(&dtrace_lock); 16435 return (EFAULT); 16436 } 16437 16438 desc.dtbd_size = buf->dtb_xamot_offset; 16439 desc.dtbd_drops = buf->dtb_xamot_drops; 16440 desc.dtbd_errors = buf->dtb_xamot_errors; 16441 desc.dtbd_oldest = 0; 16442 16443 mutex_exit(&dtrace_lock); 16444 16445 /* 16446 * Finally, copy out the buffer description. 16447 */ 16448 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16449 return (EFAULT); 16450 16451 return (0); 16452 } 16453 16454 case DTRACEIOC_CONF: { 16455 dtrace_conf_t conf; 16456 16457 bzero(&conf, sizeof (conf)); 16458 conf.dtc_difversion = DIF_VERSION; 16459 conf.dtc_difintregs = DIF_DIR_NREGS; 16460 conf.dtc_diftupregs = DIF_DTR_NREGS; 16461 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16462 16463 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16464 return (EFAULT); 16465 16466 return (0); 16467 } 16468 16469 case DTRACEIOC_STATUS: { 16470 dtrace_status_t stat; 16471 dtrace_dstate_t *dstate; 16472 int i, j; 16473 uint64_t nerrs; 16474 16475 /* 16476 * See the comment in dtrace_state_deadman() for the reason 16477 * for setting dts_laststatus to INT64_MAX before setting 16478 * it to the correct value. 16479 */ 16480 state->dts_laststatus = INT64_MAX; 16481 dtrace_membar_producer(); 16482 state->dts_laststatus = dtrace_gethrtime(); 16483 16484 bzero(&stat, sizeof (stat)); 16485 16486 mutex_enter(&dtrace_lock); 16487 16488 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16489 mutex_exit(&dtrace_lock); 16490 return (ENOENT); 16491 } 16492 16493 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16494 stat.dtst_exiting = 1; 16495 16496 nerrs = state->dts_errors; 16497 dstate = &state->dts_vstate.dtvs_dynvars; 16498 16499 for (i = 0; i < NCPU; i++) { 16500 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16501 16502 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16503 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16504 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16505 16506 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16507 stat.dtst_filled++; 16508 16509 nerrs += state->dts_buffer[i].dtb_errors; 16510 16511 for (j = 0; j < state->dts_nspeculations; j++) { 16512 dtrace_speculation_t *spec; 16513 dtrace_buffer_t *buf; 16514 16515 spec = &state->dts_speculations[j]; 16516 buf = &spec->dtsp_buffer[i]; 16517 stat.dtst_specdrops += buf->dtb_xamot_drops; 16518 } 16519 } 16520 16521 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16522 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16523 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16524 stat.dtst_dblerrors = state->dts_dblerrors; 16525 stat.dtst_killed = 16526 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16527 stat.dtst_errors = nerrs; 16528 16529 mutex_exit(&dtrace_lock); 16530 16531 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16532 return (EFAULT); 16533 16534 return (0); 16535 } 16536 16537 case DTRACEIOC_FORMAT: { 16538 dtrace_fmtdesc_t fmt; 16539 char *str; 16540 int len; 16541 16542 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16543 return (EFAULT); 16544 16545 mutex_enter(&dtrace_lock); 16546 16547 if (fmt.dtfd_format == 0 || 16548 fmt.dtfd_format > state->dts_nformats) { 16549 mutex_exit(&dtrace_lock); 16550 return (EINVAL); 16551 } 16552 16553 /* 16554 * Format strings are allocated contiguously and they are 16555 * never freed; if a format index is less than the number 16556 * of formats, we can assert that the format map is non-NULL 16557 * and that the format for the specified index is non-NULL. 16558 */ 16559 ASSERT(state->dts_formats != NULL); 16560 str = state->dts_formats[fmt.dtfd_format - 1]; 16561 ASSERT(str != NULL); 16562 16563 len = strlen(str) + 1; 16564 16565 if (len > fmt.dtfd_length) { 16566 fmt.dtfd_length = len; 16567 16568 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16569 mutex_exit(&dtrace_lock); 16570 return (EINVAL); 16571 } 16572 } else { 16573 if (copyout(str, fmt.dtfd_string, len) != 0) { 16574 mutex_exit(&dtrace_lock); 16575 return (EINVAL); 16576 } 16577 } 16578 16579 mutex_exit(&dtrace_lock); 16580 return (0); 16581 } 16582 16583 default: 16584 break; 16585 } 16586 16587 return (ENOTTY); 16588 } 16589 16590 /*ARGSUSED*/ 16591 static int 16592 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16593 { 16594 dtrace_state_t *state; 16595 16596 switch (cmd) { 16597 case DDI_DETACH: 16598 break; 16599 16600 case DDI_SUSPEND: 16601 return (DDI_SUCCESS); 16602 16603 default: 16604 return (DDI_FAILURE); 16605 } 16606 16607 mutex_enter(&cpu_lock); 16608 mutex_enter(&dtrace_provider_lock); 16609 mutex_enter(&dtrace_lock); 16610 16611 ASSERT(dtrace_opens == 0); 16612 16613 if (dtrace_helpers > 0) { 16614 mutex_exit(&dtrace_provider_lock); 16615 mutex_exit(&dtrace_lock); 16616 mutex_exit(&cpu_lock); 16617 return (DDI_FAILURE); 16618 } 16619 16620 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16621 mutex_exit(&dtrace_provider_lock); 16622 mutex_exit(&dtrace_lock); 16623 mutex_exit(&cpu_lock); 16624 return (DDI_FAILURE); 16625 } 16626 16627 dtrace_provider = NULL; 16628 16629 if ((state = dtrace_anon_grab()) != NULL) { 16630 /* 16631 * If there were ECBs on this state, the provider should 16632 * have not been allowed to detach; assert that there is 16633 * none. 16634 */ 16635 ASSERT(state->dts_necbs == 0); 16636 dtrace_state_destroy(state); 16637 16638 /* 16639 * If we're being detached with anonymous state, we need to 16640 * indicate to the kernel debugger that DTrace is now inactive. 16641 */ 16642 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16643 } 16644 16645 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16646 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16647 dtrace_cpu_init = NULL; 16648 dtrace_helpers_cleanup = NULL; 16649 dtrace_helpers_fork = NULL; 16650 dtrace_cpustart_init = NULL; 16651 dtrace_cpustart_fini = NULL; 16652 dtrace_debugger_init = NULL; 16653 dtrace_debugger_fini = NULL; 16654 dtrace_modload = NULL; 16655 dtrace_modunload = NULL; 16656 16657 mutex_exit(&cpu_lock); 16658 16659 if (dtrace_helptrace_enabled) { 16660 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16661 dtrace_helptrace_buffer = NULL; 16662 } 16663 16664 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16665 dtrace_probes = NULL; 16666 dtrace_nprobes = 0; 16667 16668 dtrace_hash_destroy(dtrace_bymod); 16669 dtrace_hash_destroy(dtrace_byfunc); 16670 dtrace_hash_destroy(dtrace_byname); 16671 dtrace_bymod = NULL; 16672 dtrace_byfunc = NULL; 16673 dtrace_byname = NULL; 16674 16675 kmem_cache_destroy(dtrace_state_cache); 16676 vmem_destroy(dtrace_minor); 16677 vmem_destroy(dtrace_arena); 16678 16679 if (dtrace_toxrange != NULL) { 16680 kmem_free(dtrace_toxrange, 16681 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16682 dtrace_toxrange = NULL; 16683 dtrace_toxranges = 0; 16684 dtrace_toxranges_max = 0; 16685 } 16686 16687 ddi_remove_minor_node(dtrace_devi, NULL); 16688 dtrace_devi = NULL; 16689 16690 ddi_soft_state_fini(&dtrace_softstate); 16691 16692 ASSERT(dtrace_vtime_references == 0); 16693 ASSERT(dtrace_opens == 0); 16694 ASSERT(dtrace_retained == NULL); 16695 16696 mutex_exit(&dtrace_lock); 16697 mutex_exit(&dtrace_provider_lock); 16698 16699 /* 16700 * We don't destroy the task queue until after we have dropped our 16701 * locks (taskq_destroy() may block on running tasks). To prevent 16702 * attempting to do work after we have effectively detached but before 16703 * the task queue has been destroyed, all tasks dispatched via the 16704 * task queue must check that DTrace is still attached before 16705 * performing any operation. 16706 */ 16707 taskq_destroy(dtrace_taskq); 16708 dtrace_taskq = NULL; 16709 16710 return (DDI_SUCCESS); 16711 } 16712 #endif 16713 16714 #if defined(sun) 16715 /*ARGSUSED*/ 16716 static int 16717 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16718 { 16719 int error; 16720 16721 switch (infocmd) { 16722 case DDI_INFO_DEVT2DEVINFO: 16723 *result = (void *)dtrace_devi; 16724 error = DDI_SUCCESS; 16725 break; 16726 case DDI_INFO_DEVT2INSTANCE: 16727 *result = (void *)0; 16728 error = DDI_SUCCESS; 16729 break; 16730 default: 16731 error = DDI_FAILURE; 16732 } 16733 return (error); 16734 } 16735 #endif 16736 16737 #if defined(sun) 16738 static struct cb_ops dtrace_cb_ops = { 16739 dtrace_open, /* open */ 16740 dtrace_close, /* close */ 16741 nulldev, /* strategy */ 16742 nulldev, /* print */ 16743 nodev, /* dump */ 16744 nodev, /* read */ 16745 nodev, /* write */ 16746 dtrace_ioctl, /* ioctl */ 16747 nodev, /* devmap */ 16748 nodev, /* mmap */ 16749 nodev, /* segmap */ 16750 nochpoll, /* poll */ 16751 ddi_prop_op, /* cb_prop_op */ 16752 0, /* streamtab */ 16753 D_NEW | D_MP /* Driver compatibility flag */ 16754 }; 16755 16756 static struct dev_ops dtrace_ops = { 16757 DEVO_REV, /* devo_rev */ 16758 0, /* refcnt */ 16759 dtrace_info, /* get_dev_info */ 16760 nulldev, /* identify */ 16761 nulldev, /* probe */ 16762 dtrace_attach, /* attach */ 16763 dtrace_detach, /* detach */ 16764 nodev, /* reset */ 16765 &dtrace_cb_ops, /* driver operations */ 16766 NULL, /* bus operations */ 16767 nodev /* dev power */ 16768 }; 16769 16770 static struct modldrv modldrv = { 16771 &mod_driverops, /* module type (this is a pseudo driver) */ 16772 "Dynamic Tracing", /* name of module */ 16773 &dtrace_ops, /* driver ops */ 16774 }; 16775 16776 static struct modlinkage modlinkage = { 16777 MODREV_1, 16778 (void *)&modldrv, 16779 NULL 16780 }; 16781 16782 int 16783 _init(void) 16784 { 16785 return (mod_install(&modlinkage)); 16786 } 16787 16788 int 16789 _info(struct modinfo *modinfop) 16790 { 16791 return (mod_info(&modlinkage, modinfop)); 16792 } 16793 16794 int 16795 _fini(void) 16796 { 16797 return (mod_remove(&modlinkage)); 16798 } 16799 #else 16800 16801 static d_ioctl_t dtrace_ioctl; 16802 static d_ioctl_t dtrace_ioctl_helper; 16803 static void dtrace_load(void *); 16804 static int dtrace_unload(void); 16805 #if __FreeBSD_version < 800039 16806 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16807 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16808 static eventhandler_tag eh_tag; /* Event handler tag. */ 16809 #else 16810 static struct cdev *dtrace_dev; 16811 static struct cdev *helper_dev; 16812 #endif 16813 16814 void dtrace_invop_init(void); 16815 void dtrace_invop_uninit(void); 16816 16817 static struct cdevsw dtrace_cdevsw = { 16818 .d_version = D_VERSION, 16819 #if __FreeBSD_version < 800039 16820 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16821 .d_close = dtrace_close, 16822 #endif 16823 .d_ioctl = dtrace_ioctl, 16824 .d_open = dtrace_open, 16825 .d_name = "dtrace", 16826 }; 16827 16828 static struct cdevsw helper_cdevsw = { 16829 .d_version = D_VERSION, 16830 .d_ioctl = dtrace_ioctl_helper, 16831 .d_name = "helper", 16832 }; 16833 16834 #include <dtrace_anon.c> 16835 #if __FreeBSD_version < 800039 16836 #include <dtrace_clone.c> 16837 #endif 16838 #include <dtrace_ioctl.c> 16839 #include <dtrace_load.c> 16840 #include <dtrace_modevent.c> 16841 #include <dtrace_sysctl.c> 16842 #include <dtrace_unload.c> 16843 #include <dtrace_vtime.c> 16844 #include <dtrace_hacks.c> 16845 #include <dtrace_isa.c> 16846 16847 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16848 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16849 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16850 16851 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16852 MODULE_VERSION(dtrace, 1); 16853 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16854 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16855 #endif 16856