1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70 #include <sys/errno.h> 71 #if !defined(sun) 72 #include <sys/time.h> 73 #endif 74 #include <sys/stat.h> 75 #include <sys/modctl.h> 76 #include <sys/conf.h> 77 #include <sys/systm.h> 78 #if defined(sun) 79 #include <sys/ddi.h> 80 #include <sys/sunddi.h> 81 #endif 82 #include <sys/cpuvar.h> 83 #include <sys/kmem.h> 84 #if defined(sun) 85 #include <sys/strsubr.h> 86 #endif 87 #include <sys/sysmacros.h> 88 #include <sys/dtrace_impl.h> 89 #include <sys/atomic.h> 90 #include <sys/cmn_err.h> 91 #if defined(sun) 92 #include <sys/mutex_impl.h> 93 #include <sys/rwlock_impl.h> 94 #endif 95 #include <sys/ctf_api.h> 96 #if defined(sun) 97 #include <sys/panic.h> 98 #include <sys/priv_impl.h> 99 #endif 100 #include <sys/policy.h> 101 #if defined(sun) 102 #include <sys/cred_impl.h> 103 #include <sys/procfs_isa.h> 104 #endif 105 #include <sys/taskq.h> 106 #if defined(sun) 107 #include <sys/mkdev.h> 108 #include <sys/kdi.h> 109 #endif 110 #include <sys/zone.h> 111 #include <sys/socket.h> 112 #include <netinet/in.h> 113 114 /* FreeBSD includes: */ 115 #if !defined(sun) 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/limits.h> 119 #include <sys/kdb.h> 120 #include <sys/kernel.h> 121 #include <sys/malloc.h> 122 #include <sys/sysctl.h> 123 #include <sys/lock.h> 124 #include <sys/mutex.h> 125 #include <sys/rwlock.h> 126 #include <sys/sx.h> 127 #include <sys/dtrace_bsd.h> 128 #include <netinet/in.h> 129 #include "dtrace_cddl.h" 130 #include "dtrace_debug.c" 131 #endif 132 133 /* 134 * DTrace Tunable Variables 135 * 136 * The following variables may be tuned by adding a line to /etc/system that 137 * includes both the name of the DTrace module ("dtrace") and the name of the 138 * variable. For example: 139 * 140 * set dtrace:dtrace_destructive_disallow = 1 141 * 142 * In general, the only variables that one should be tuning this way are those 143 * that affect system-wide DTrace behavior, and for which the default behavior 144 * is undesirable. Most of these variables are tunable on a per-consumer 145 * basis using DTrace options, and need not be tuned on a system-wide basis. 146 * When tuning these variables, avoid pathological values; while some attempt 147 * is made to verify the integrity of these variables, they are not considered 148 * part of the supported interface to DTrace, and they are therefore not 149 * checked comprehensively. Further, these variables should not be tuned 150 * dynamically via "mdb -kw" or other means; they should only be tuned via 151 * /etc/system. 152 */ 153 int dtrace_destructive_disallow = 0; 154 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 155 size_t dtrace_difo_maxsize = (256 * 1024); 156 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 157 size_t dtrace_global_maxsize = (16 * 1024); 158 size_t dtrace_actions_max = (16 * 1024); 159 size_t dtrace_retain_max = 1024; 160 dtrace_optval_t dtrace_helper_actions_max = 32; 161 dtrace_optval_t dtrace_helper_providers_max = 32; 162 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 163 size_t dtrace_strsize_default = 256; 164 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 165 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 166 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 167 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 168 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 169 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 170 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 171 dtrace_optval_t dtrace_nspec_default = 1; 172 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 173 dtrace_optval_t dtrace_stackframes_default = 20; 174 dtrace_optval_t dtrace_ustackframes_default = 20; 175 dtrace_optval_t dtrace_jstackframes_default = 50; 176 dtrace_optval_t dtrace_jstackstrsize_default = 512; 177 int dtrace_msgdsize_max = 128; 178 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 179 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 180 int dtrace_devdepth_max = 32; 181 int dtrace_err_verbose; 182 hrtime_t dtrace_deadman_interval = NANOSEC; 183 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 184 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 185 186 /* 187 * DTrace External Variables 188 * 189 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 190 * available to DTrace consumers via the backtick (`) syntax. One of these, 191 * dtrace_zero, is made deliberately so: it is provided as a source of 192 * well-known, zero-filled memory. While this variable is not documented, 193 * it is used by some translators as an implementation detail. 194 */ 195 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 196 197 /* 198 * DTrace Internal Variables 199 */ 200 #if defined(sun) 201 static dev_info_t *dtrace_devi; /* device info */ 202 #endif 203 #if defined(sun) 204 static vmem_t *dtrace_arena; /* probe ID arena */ 205 static vmem_t *dtrace_minor; /* minor number arena */ 206 static taskq_t *dtrace_taskq; /* task queue */ 207 #else 208 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 209 #endif 210 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 211 static int dtrace_nprobes; /* number of probes */ 212 static dtrace_provider_t *dtrace_provider; /* provider list */ 213 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 214 static int dtrace_opens; /* number of opens */ 215 static int dtrace_helpers; /* number of helpers */ 216 #if defined(sun) 217 static void *dtrace_softstate; /* softstate pointer */ 218 #endif 219 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 220 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 221 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 222 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 223 static int dtrace_toxranges; /* number of toxic ranges */ 224 static int dtrace_toxranges_max; /* size of toxic range array */ 225 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 226 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 227 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 228 static kthread_t *dtrace_panicked; /* panicking thread */ 229 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 230 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 231 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 232 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 233 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 234 #if !defined(sun) 235 static struct mtx dtrace_unr_mtx; 236 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 237 int dtrace_in_probe; /* non-zero if executing a probe */ 238 #if defined(__i386__) || defined(__amd64__) 239 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 240 #endif 241 #endif 242 243 /* 244 * DTrace Locking 245 * DTrace is protected by three (relatively coarse-grained) locks: 246 * 247 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 248 * including enabling state, probes, ECBs, consumer state, helper state, 249 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 250 * probe context is lock-free -- synchronization is handled via the 251 * dtrace_sync() cross call mechanism. 252 * 253 * (2) dtrace_provider_lock is required when manipulating provider state, or 254 * when provider state must be held constant. 255 * 256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 257 * when meta provider state must be held constant. 258 * 259 * The lock ordering between these three locks is dtrace_meta_lock before 260 * dtrace_provider_lock before dtrace_lock. (In particular, there are 261 * several places where dtrace_provider_lock is held by the framework as it 262 * calls into the providers -- which then call back into the framework, 263 * grabbing dtrace_lock.) 264 * 265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 267 * role as a coarse-grained lock; it is acquired before both of these locks. 268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 271 * acquired _between_ dtrace_provider_lock and dtrace_lock. 272 */ 273 static kmutex_t dtrace_lock; /* probe state lock */ 274 static kmutex_t dtrace_provider_lock; /* provider state lock */ 275 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 276 277 #if !defined(sun) 278 /* XXX FreeBSD hacks. */ 279 static kmutex_t mod_lock; 280 281 #define cr_suid cr_svuid 282 #define cr_sgid cr_svgid 283 #define ipaddr_t in_addr_t 284 #define mod_modname pathname 285 #define vuprintf vprintf 286 #define ttoproc(_a) ((_a)->td_proc) 287 #define crgetzoneid(_a) 0 288 #define NCPU MAXCPU 289 #define SNOCD 0 290 #define CPU_ON_INTR(_a) 0 291 292 #define PRIV_EFFECTIVE (1 << 0) 293 #define PRIV_DTRACE_KERNEL (1 << 1) 294 #define PRIV_DTRACE_PROC (1 << 2) 295 #define PRIV_DTRACE_USER (1 << 3) 296 #define PRIV_PROC_OWNER (1 << 4) 297 #define PRIV_PROC_ZONE (1 << 5) 298 #define PRIV_ALL ~0 299 300 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 301 #endif 302 303 #if defined(sun) 304 #define curcpu CPU->cpu_id 305 #endif 306 307 308 /* 309 * DTrace Provider Variables 310 * 311 * These are the variables relating to DTrace as a provider (that is, the 312 * provider of the BEGIN, END, and ERROR probes). 313 */ 314 static dtrace_pattr_t dtrace_provider_attr = { 315 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320 }; 321 322 static void 323 dtrace_nullop(void) 324 {} 325 326 static dtrace_pops_t dtrace_provider_ops = { 327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 328 (void (*)(void *, modctl_t *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 NULL, 334 NULL, 335 NULL, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 337 }; 338 339 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 340 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 341 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 342 343 /* 344 * DTrace Helper Tracing Variables 345 */ 346 uint32_t dtrace_helptrace_next = 0; 347 uint32_t dtrace_helptrace_nlocals; 348 char *dtrace_helptrace_buffer; 349 int dtrace_helptrace_bufsize = 512 * 1024; 350 351 #ifdef DEBUG 352 int dtrace_helptrace_enabled = 1; 353 #else 354 int dtrace_helptrace_enabled = 0; 355 #endif 356 357 /* 358 * DTrace Error Hashing 359 * 360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 361 * table. This is very useful for checking coverage of tests that are 362 * expected to induce DIF or DOF processing errors, and may be useful for 363 * debugging problems in the DIF code generator or in DOF generation . The 364 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 365 */ 366 #ifdef DEBUG 367 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 368 static const char *dtrace_errlast; 369 static kthread_t *dtrace_errthread; 370 static kmutex_t dtrace_errlock; 371 #endif 372 373 /* 374 * DTrace Macros and Constants 375 * 376 * These are various macros that are useful in various spots in the 377 * implementation, along with a few random constants that have no meaning 378 * outside of the implementation. There is no real structure to this cpp 379 * mishmash -- but is there ever? 380 */ 381 #define DTRACE_HASHSTR(hash, probe) \ 382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 383 384 #define DTRACE_HASHNEXT(hash, probe) \ 385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 386 387 #define DTRACE_HASHPREV(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 389 390 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 393 394 #define DTRACE_AGGHASHSIZE_SLEW 17 395 396 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 397 398 /* 399 * The key for a thread-local variable consists of the lower 61 bits of the 400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 402 * equal to a variable identifier. This is necessary (but not sufficient) to 403 * assure that global associative arrays never collide with thread-local 404 * variables. To guarantee that they cannot collide, we must also define the 405 * order for keying dynamic variables. That order is: 406 * 407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 408 * 409 * Because the variable-key and the tls-key are in orthogonal spaces, there is 410 * no way for a global variable key signature to match a thread-local key 411 * signature. 412 */ 413 #if defined(sun) 414 #define DTRACE_TLS_THRKEY(where) { \ 415 uint_t intr = 0; \ 416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 417 for (; actv; actv >>= 1) \ 418 intr++; \ 419 ASSERT(intr < (1 << 3)); \ 420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 422 } 423 #else 424 #define DTRACE_TLS_THRKEY(where) { \ 425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 426 uint_t intr = 0; \ 427 uint_t actv = _c->cpu_intr_actv; \ 428 for (; actv; actv >>= 1) \ 429 intr++; \ 430 ASSERT(intr < (1 << 3)); \ 431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 433 } 434 #endif 435 436 #define DT_BSWAP_8(x) ((x) & 0xff) 437 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 438 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 439 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 440 441 #define DT_MASK_LO 0x00000000FFFFFFFFULL 442 443 #define DTRACE_STORE(type, tomax, offset, what) \ 444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 445 446 #ifndef __i386 447 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 448 if (addr & (size - 1)) { \ 449 *flags |= CPU_DTRACE_BADALIGN; \ 450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 451 return (0); \ 452 } 453 #else 454 #define DTRACE_ALIGNCHECK(addr, size, flags) 455 #endif 456 457 /* 458 * Test whether a range of memory starting at testaddr of size testsz falls 459 * within the range of memory described by addr, sz. We take care to avoid 460 * problems with overflow and underflow of the unsigned quantities, and 461 * disallow all negative sizes. Ranges of size 0 are allowed. 462 */ 463 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 464 ((testaddr) - (baseaddr) < (basesz) && \ 465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 466 (testaddr) + (testsz) >= (testaddr)) 467 468 /* 469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 470 * alloc_sz on the righthand side of the comparison in order to avoid overflow 471 * or underflow in the comparison with it. This is simpler than the INRANGE 472 * check above, because we know that the dtms_scratch_ptr is valid in the 473 * range. Allocations of size zero are allowed. 474 */ 475 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 477 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 478 479 #define DTRACE_LOADFUNC(bits) \ 480 /*CSTYLED*/ \ 481 uint##bits##_t \ 482 dtrace_load##bits(uintptr_t addr) \ 483 { \ 484 size_t size = bits / NBBY; \ 485 /*CSTYLED*/ \ 486 uint##bits##_t rval; \ 487 int i; \ 488 volatile uint16_t *flags = (volatile uint16_t *) \ 489 &cpu_core[curcpu].cpuc_dtrace_flags; \ 490 \ 491 DTRACE_ALIGNCHECK(addr, size, flags); \ 492 \ 493 for (i = 0; i < dtrace_toxranges; i++) { \ 494 if (addr >= dtrace_toxrange[i].dtt_limit) \ 495 continue; \ 496 \ 497 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 498 continue; \ 499 \ 500 /* \ 501 * This address falls within a toxic region; return 0. \ 502 */ \ 503 *flags |= CPU_DTRACE_BADADDR; \ 504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 505 return (0); \ 506 } \ 507 \ 508 *flags |= CPU_DTRACE_NOFAULT; \ 509 /*CSTYLED*/ \ 510 rval = *((volatile uint##bits##_t *)addr); \ 511 *flags &= ~CPU_DTRACE_NOFAULT; \ 512 \ 513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 514 } 515 516 #ifdef _LP64 517 #define dtrace_loadptr dtrace_load64 518 #else 519 #define dtrace_loadptr dtrace_load32 520 #endif 521 522 #define DTRACE_DYNHASH_FREE 0 523 #define DTRACE_DYNHASH_SINK 1 524 #define DTRACE_DYNHASH_VALID 2 525 526 #define DTRACE_MATCH_NEXT 0 527 #define DTRACE_MATCH_DONE 1 528 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 529 #define DTRACE_STATE_ALIGN 64 530 531 #define DTRACE_FLAGS2FLT(flags) \ 532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 541 DTRACEFLT_UNKNOWN) 542 543 #define DTRACEACT_ISSTRING(act) \ 544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 546 547 /* Function prototype definitions: */ 548 static size_t dtrace_strlen(const char *, size_t); 549 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 550 static void dtrace_enabling_provide(dtrace_provider_t *); 551 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 552 static void dtrace_enabling_matchall(void); 553 static dtrace_state_t *dtrace_anon_grab(void); 554 #if defined(sun) 555 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 556 dtrace_state_t *, uint64_t, uint64_t); 557 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 558 #endif 559 static void dtrace_buffer_drop(dtrace_buffer_t *); 560 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 561 dtrace_state_t *, dtrace_mstate_t *); 562 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 563 dtrace_optval_t); 564 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 565 #if defined(sun) 566 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 567 #endif 568 uint16_t dtrace_load16(uintptr_t); 569 uint32_t dtrace_load32(uintptr_t); 570 uint64_t dtrace_load64(uintptr_t); 571 uint8_t dtrace_load8(uintptr_t); 572 void dtrace_dynvar_clean(dtrace_dstate_t *); 573 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 574 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 575 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 576 577 /* 578 * DTrace Probe Context Functions 579 * 580 * These functions are called from probe context. Because probe context is 581 * any context in which C may be called, arbitrarily locks may be held, 582 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 583 * As a result, functions called from probe context may only call other DTrace 584 * support functions -- they may not interact at all with the system at large. 585 * (Note that the ASSERT macro is made probe-context safe by redefining it in 586 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 587 * loads are to be performed from probe context, they _must_ be in terms of 588 * the safe dtrace_load*() variants. 589 * 590 * Some functions in this block are not actually called from probe context; 591 * for these functions, there will be a comment above the function reading 592 * "Note: not called from probe context." 593 */ 594 void 595 dtrace_panic(const char *format, ...) 596 { 597 va_list alist; 598 599 va_start(alist, format); 600 dtrace_vpanic(format, alist); 601 va_end(alist); 602 } 603 604 int 605 dtrace_assfail(const char *a, const char *f, int l) 606 { 607 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 608 609 /* 610 * We just need something here that even the most clever compiler 611 * cannot optimize away. 612 */ 613 return (a[(uintptr_t)f]); 614 } 615 616 /* 617 * Atomically increment a specified error counter from probe context. 618 */ 619 static void 620 dtrace_error(uint32_t *counter) 621 { 622 /* 623 * Most counters stored to in probe context are per-CPU counters. 624 * However, there are some error conditions that are sufficiently 625 * arcane that they don't merit per-CPU storage. If these counters 626 * are incremented concurrently on different CPUs, scalability will be 627 * adversely affected -- but we don't expect them to be white-hot in a 628 * correctly constructed enabling... 629 */ 630 uint32_t oval, nval; 631 632 do { 633 oval = *counter; 634 635 if ((nval = oval + 1) == 0) { 636 /* 637 * If the counter would wrap, set it to 1 -- assuring 638 * that the counter is never zero when we have seen 639 * errors. (The counter must be 32-bits because we 640 * aren't guaranteed a 64-bit compare&swap operation.) 641 * To save this code both the infamy of being fingered 642 * by a priggish news story and the indignity of being 643 * the target of a neo-puritan witch trial, we're 644 * carefully avoiding any colorful description of the 645 * likelihood of this condition -- but suffice it to 646 * say that it is only slightly more likely than the 647 * overflow of predicate cache IDs, as discussed in 648 * dtrace_predicate_create(). 649 */ 650 nval = 1; 651 } 652 } while (dtrace_cas32(counter, oval, nval) != oval); 653 } 654 655 /* 656 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 657 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 658 */ 659 DTRACE_LOADFUNC(8) 660 DTRACE_LOADFUNC(16) 661 DTRACE_LOADFUNC(32) 662 DTRACE_LOADFUNC(64) 663 664 static int 665 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 666 { 667 if (dest < mstate->dtms_scratch_base) 668 return (0); 669 670 if (dest + size < dest) 671 return (0); 672 673 if (dest + size > mstate->dtms_scratch_ptr) 674 return (0); 675 676 return (1); 677 } 678 679 static int 680 dtrace_canstore_statvar(uint64_t addr, size_t sz, 681 dtrace_statvar_t **svars, int nsvars) 682 { 683 int i; 684 685 for (i = 0; i < nsvars; i++) { 686 dtrace_statvar_t *svar = svars[i]; 687 688 if (svar == NULL || svar->dtsv_size == 0) 689 continue; 690 691 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 692 return (1); 693 } 694 695 return (0); 696 } 697 698 /* 699 * Check to see if the address is within a memory region to which a store may 700 * be issued. This includes the DTrace scratch areas, and any DTrace variable 701 * region. The caller of dtrace_canstore() is responsible for performing any 702 * alignment checks that are needed before stores are actually executed. 703 */ 704 static int 705 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 706 dtrace_vstate_t *vstate) 707 { 708 /* 709 * First, check to see if the address is in scratch space... 710 */ 711 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 712 mstate->dtms_scratch_size)) 713 return (1); 714 715 /* 716 * Now check to see if it's a dynamic variable. This check will pick 717 * up both thread-local variables and any global dynamically-allocated 718 * variables. 719 */ 720 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 721 vstate->dtvs_dynvars.dtds_size)) { 722 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 723 uintptr_t base = (uintptr_t)dstate->dtds_base + 724 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 725 uintptr_t chunkoffs; 726 727 /* 728 * Before we assume that we can store here, we need to make 729 * sure that it isn't in our metadata -- storing to our 730 * dynamic variable metadata would corrupt our state. For 731 * the range to not include any dynamic variable metadata, 732 * it must: 733 * 734 * (1) Start above the hash table that is at the base of 735 * the dynamic variable space 736 * 737 * (2) Have a starting chunk offset that is beyond the 738 * dtrace_dynvar_t that is at the base of every chunk 739 * 740 * (3) Not span a chunk boundary 741 * 742 */ 743 if (addr < base) 744 return (0); 745 746 chunkoffs = (addr - base) % dstate->dtds_chunksize; 747 748 if (chunkoffs < sizeof (dtrace_dynvar_t)) 749 return (0); 750 751 if (chunkoffs + sz > dstate->dtds_chunksize) 752 return (0); 753 754 return (1); 755 } 756 757 /* 758 * Finally, check the static local and global variables. These checks 759 * take the longest, so we perform them last. 760 */ 761 if (dtrace_canstore_statvar(addr, sz, 762 vstate->dtvs_locals, vstate->dtvs_nlocals)) 763 return (1); 764 765 if (dtrace_canstore_statvar(addr, sz, 766 vstate->dtvs_globals, vstate->dtvs_nglobals)) 767 return (1); 768 769 return (0); 770 } 771 772 773 /* 774 * Convenience routine to check to see if the address is within a memory 775 * region in which a load may be issued given the user's privilege level; 776 * if not, it sets the appropriate error flags and loads 'addr' into the 777 * illegal value slot. 778 * 779 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 780 * appropriate memory access protection. 781 */ 782 static int 783 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 784 dtrace_vstate_t *vstate) 785 { 786 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 787 788 /* 789 * If we hold the privilege to read from kernel memory, then 790 * everything is readable. 791 */ 792 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 793 return (1); 794 795 /* 796 * You can obviously read that which you can store. 797 */ 798 if (dtrace_canstore(addr, sz, mstate, vstate)) 799 return (1); 800 801 /* 802 * We're allowed to read from our own string table. 803 */ 804 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 805 mstate->dtms_difo->dtdo_strlen)) 806 return (1); 807 808 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 809 *illval = addr; 810 return (0); 811 } 812 813 /* 814 * Convenience routine to check to see if a given string is within a memory 815 * region in which a load may be issued given the user's privilege level; 816 * this exists so that we don't need to issue unnecessary dtrace_strlen() 817 * calls in the event that the user has all privileges. 818 */ 819 static int 820 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 821 dtrace_vstate_t *vstate) 822 { 823 size_t strsz; 824 825 /* 826 * If we hold the privilege to read from kernel memory, then 827 * everything is readable. 828 */ 829 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 830 return (1); 831 832 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 833 if (dtrace_canload(addr, strsz, mstate, vstate)) 834 return (1); 835 836 return (0); 837 } 838 839 /* 840 * Convenience routine to check to see if a given variable is within a memory 841 * region in which a load may be issued given the user's privilege level. 842 */ 843 static int 844 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 845 dtrace_vstate_t *vstate) 846 { 847 size_t sz; 848 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 849 850 /* 851 * If we hold the privilege to read from kernel memory, then 852 * everything is readable. 853 */ 854 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 855 return (1); 856 857 if (type->dtdt_kind == DIF_TYPE_STRING) 858 sz = dtrace_strlen(src, 859 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 860 else 861 sz = type->dtdt_size; 862 863 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 864 } 865 866 /* 867 * Compare two strings using safe loads. 868 */ 869 static int 870 dtrace_strncmp(char *s1, char *s2, size_t limit) 871 { 872 uint8_t c1, c2; 873 volatile uint16_t *flags; 874 875 if (s1 == s2 || limit == 0) 876 return (0); 877 878 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 879 880 do { 881 if (s1 == NULL) { 882 c1 = '\0'; 883 } else { 884 c1 = dtrace_load8((uintptr_t)s1++); 885 } 886 887 if (s2 == NULL) { 888 c2 = '\0'; 889 } else { 890 c2 = dtrace_load8((uintptr_t)s2++); 891 } 892 893 if (c1 != c2) 894 return (c1 - c2); 895 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 896 897 return (0); 898 } 899 900 /* 901 * Compute strlen(s) for a string using safe memory accesses. The additional 902 * len parameter is used to specify a maximum length to ensure completion. 903 */ 904 static size_t 905 dtrace_strlen(const char *s, size_t lim) 906 { 907 uint_t len; 908 909 for (len = 0; len != lim; len++) { 910 if (dtrace_load8((uintptr_t)s++) == '\0') 911 break; 912 } 913 914 return (len); 915 } 916 917 /* 918 * Check if an address falls within a toxic region. 919 */ 920 static int 921 dtrace_istoxic(uintptr_t kaddr, size_t size) 922 { 923 uintptr_t taddr, tsize; 924 int i; 925 926 for (i = 0; i < dtrace_toxranges; i++) { 927 taddr = dtrace_toxrange[i].dtt_base; 928 tsize = dtrace_toxrange[i].dtt_limit - taddr; 929 930 if (kaddr - taddr < tsize) { 931 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 932 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 933 return (1); 934 } 935 936 if (taddr - kaddr < size) { 937 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 938 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 939 return (1); 940 } 941 } 942 943 return (0); 944 } 945 946 /* 947 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 948 * memory specified by the DIF program. The dst is assumed to be safe memory 949 * that we can store to directly because it is managed by DTrace. As with 950 * standard bcopy, overlapping copies are handled properly. 951 */ 952 static void 953 dtrace_bcopy(const void *src, void *dst, size_t len) 954 { 955 if (len != 0) { 956 uint8_t *s1 = dst; 957 const uint8_t *s2 = src; 958 959 if (s1 <= s2) { 960 do { 961 *s1++ = dtrace_load8((uintptr_t)s2++); 962 } while (--len != 0); 963 } else { 964 s2 += len; 965 s1 += len; 966 967 do { 968 *--s1 = dtrace_load8((uintptr_t)--s2); 969 } while (--len != 0); 970 } 971 } 972 } 973 974 /* 975 * Copy src to dst using safe memory accesses, up to either the specified 976 * length, or the point that a nul byte is encountered. The src is assumed to 977 * be unsafe memory specified by the DIF program. The dst is assumed to be 978 * safe memory that we can store to directly because it is managed by DTrace. 979 * Unlike dtrace_bcopy(), overlapping regions are not handled. 980 */ 981 static void 982 dtrace_strcpy(const void *src, void *dst, size_t len) 983 { 984 if (len != 0) { 985 uint8_t *s1 = dst, c; 986 const uint8_t *s2 = src; 987 988 do { 989 *s1++ = c = dtrace_load8((uintptr_t)s2++); 990 } while (--len != 0 && c != '\0'); 991 } 992 } 993 994 /* 995 * Copy src to dst, deriving the size and type from the specified (BYREF) 996 * variable type. The src is assumed to be unsafe memory specified by the DIF 997 * program. The dst is assumed to be DTrace variable memory that is of the 998 * specified type; we assume that we can store to directly. 999 */ 1000 static void 1001 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1002 { 1003 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1004 1005 if (type->dtdt_kind == DIF_TYPE_STRING) { 1006 dtrace_strcpy(src, dst, type->dtdt_size); 1007 } else { 1008 dtrace_bcopy(src, dst, type->dtdt_size); 1009 } 1010 } 1011 1012 /* 1013 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1014 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1015 * safe memory that we can access directly because it is managed by DTrace. 1016 */ 1017 static int 1018 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1019 { 1020 volatile uint16_t *flags; 1021 1022 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1023 1024 if (s1 == s2) 1025 return (0); 1026 1027 if (s1 == NULL || s2 == NULL) 1028 return (1); 1029 1030 if (s1 != s2 && len != 0) { 1031 const uint8_t *ps1 = s1; 1032 const uint8_t *ps2 = s2; 1033 1034 do { 1035 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1036 return (1); 1037 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1038 } 1039 return (0); 1040 } 1041 1042 /* 1043 * Zero the specified region using a simple byte-by-byte loop. Note that this 1044 * is for safe DTrace-managed memory only. 1045 */ 1046 static void 1047 dtrace_bzero(void *dst, size_t len) 1048 { 1049 uchar_t *cp; 1050 1051 for (cp = dst; len != 0; len--) 1052 *cp++ = 0; 1053 } 1054 1055 static void 1056 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1057 { 1058 uint64_t result[2]; 1059 1060 result[0] = addend1[0] + addend2[0]; 1061 result[1] = addend1[1] + addend2[1] + 1062 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1063 1064 sum[0] = result[0]; 1065 sum[1] = result[1]; 1066 } 1067 1068 /* 1069 * Shift the 128-bit value in a by b. If b is positive, shift left. 1070 * If b is negative, shift right. 1071 */ 1072 static void 1073 dtrace_shift_128(uint64_t *a, int b) 1074 { 1075 uint64_t mask; 1076 1077 if (b == 0) 1078 return; 1079 1080 if (b < 0) { 1081 b = -b; 1082 if (b >= 64) { 1083 a[0] = a[1] >> (b - 64); 1084 a[1] = 0; 1085 } else { 1086 a[0] >>= b; 1087 mask = 1LL << (64 - b); 1088 mask -= 1; 1089 a[0] |= ((a[1] & mask) << (64 - b)); 1090 a[1] >>= b; 1091 } 1092 } else { 1093 if (b >= 64) { 1094 a[1] = a[0] << (b - 64); 1095 a[0] = 0; 1096 } else { 1097 a[1] <<= b; 1098 mask = a[0] >> (64 - b); 1099 a[1] |= mask; 1100 a[0] <<= b; 1101 } 1102 } 1103 } 1104 1105 /* 1106 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1107 * use native multiplication on those, and then re-combine into the 1108 * resulting 128-bit value. 1109 * 1110 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1111 * hi1 * hi2 << 64 + 1112 * hi1 * lo2 << 32 + 1113 * hi2 * lo1 << 32 + 1114 * lo1 * lo2 1115 */ 1116 static void 1117 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1118 { 1119 uint64_t hi1, hi2, lo1, lo2; 1120 uint64_t tmp[2]; 1121 1122 hi1 = factor1 >> 32; 1123 hi2 = factor2 >> 32; 1124 1125 lo1 = factor1 & DT_MASK_LO; 1126 lo2 = factor2 & DT_MASK_LO; 1127 1128 product[0] = lo1 * lo2; 1129 product[1] = hi1 * hi2; 1130 1131 tmp[0] = hi1 * lo2; 1132 tmp[1] = 0; 1133 dtrace_shift_128(tmp, 32); 1134 dtrace_add_128(product, tmp, product); 1135 1136 tmp[0] = hi2 * lo1; 1137 tmp[1] = 0; 1138 dtrace_shift_128(tmp, 32); 1139 dtrace_add_128(product, tmp, product); 1140 } 1141 1142 /* 1143 * This privilege check should be used by actions and subroutines to 1144 * verify that the user credentials of the process that enabled the 1145 * invoking ECB match the target credentials 1146 */ 1147 static int 1148 dtrace_priv_proc_common_user(dtrace_state_t *state) 1149 { 1150 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1151 1152 /* 1153 * We should always have a non-NULL state cred here, since if cred 1154 * is null (anonymous tracing), we fast-path bypass this routine. 1155 */ 1156 ASSERT(s_cr != NULL); 1157 1158 if ((cr = CRED()) != NULL && 1159 s_cr->cr_uid == cr->cr_uid && 1160 s_cr->cr_uid == cr->cr_ruid && 1161 s_cr->cr_uid == cr->cr_suid && 1162 s_cr->cr_gid == cr->cr_gid && 1163 s_cr->cr_gid == cr->cr_rgid && 1164 s_cr->cr_gid == cr->cr_sgid) 1165 return (1); 1166 1167 return (0); 1168 } 1169 1170 /* 1171 * This privilege check should be used by actions and subroutines to 1172 * verify that the zone of the process that enabled the invoking ECB 1173 * matches the target credentials 1174 */ 1175 static int 1176 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1177 { 1178 #if defined(sun) 1179 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1180 1181 /* 1182 * We should always have a non-NULL state cred here, since if cred 1183 * is null (anonymous tracing), we fast-path bypass this routine. 1184 */ 1185 ASSERT(s_cr != NULL); 1186 1187 if ((cr = CRED()) != NULL && 1188 s_cr->cr_zone == cr->cr_zone) 1189 return (1); 1190 1191 return (0); 1192 #else 1193 return (1); 1194 #endif 1195 } 1196 1197 /* 1198 * This privilege check should be used by actions and subroutines to 1199 * verify that the process has not setuid or changed credentials. 1200 */ 1201 static int 1202 dtrace_priv_proc_common_nocd(void) 1203 { 1204 proc_t *proc; 1205 1206 if ((proc = ttoproc(curthread)) != NULL && 1207 !(proc->p_flag & SNOCD)) 1208 return (1); 1209 1210 return (0); 1211 } 1212 1213 static int 1214 dtrace_priv_proc_destructive(dtrace_state_t *state) 1215 { 1216 int action = state->dts_cred.dcr_action; 1217 1218 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1219 dtrace_priv_proc_common_zone(state) == 0) 1220 goto bad; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1223 dtrace_priv_proc_common_user(state) == 0) 1224 goto bad; 1225 1226 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1227 dtrace_priv_proc_common_nocd() == 0) 1228 goto bad; 1229 1230 return (1); 1231 1232 bad: 1233 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1234 1235 return (0); 1236 } 1237 1238 static int 1239 dtrace_priv_proc_control(dtrace_state_t *state) 1240 { 1241 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1242 return (1); 1243 1244 if (dtrace_priv_proc_common_zone(state) && 1245 dtrace_priv_proc_common_user(state) && 1246 dtrace_priv_proc_common_nocd()) 1247 return (1); 1248 1249 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1250 1251 return (0); 1252 } 1253 1254 static int 1255 dtrace_priv_proc(dtrace_state_t *state) 1256 { 1257 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1258 return (1); 1259 1260 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1261 1262 return (0); 1263 } 1264 1265 static int 1266 dtrace_priv_kernel(dtrace_state_t *state) 1267 { 1268 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1269 return (1); 1270 1271 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1272 1273 return (0); 1274 } 1275 1276 static int 1277 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1278 { 1279 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1280 return (1); 1281 1282 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1283 1284 return (0); 1285 } 1286 1287 /* 1288 * Note: not called from probe context. This function is called 1289 * asynchronously (and at a regular interval) from outside of probe context to 1290 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1291 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1292 */ 1293 void 1294 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1295 { 1296 dtrace_dynvar_t *dirty; 1297 dtrace_dstate_percpu_t *dcpu; 1298 int i, work = 0; 1299 1300 for (i = 0; i < NCPU; i++) { 1301 dcpu = &dstate->dtds_percpu[i]; 1302 1303 ASSERT(dcpu->dtdsc_rinsing == NULL); 1304 1305 /* 1306 * If the dirty list is NULL, there is no dirty work to do. 1307 */ 1308 if (dcpu->dtdsc_dirty == NULL) 1309 continue; 1310 1311 /* 1312 * If the clean list is non-NULL, then we're not going to do 1313 * any work for this CPU -- it means that there has not been 1314 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1315 * since the last time we cleaned house. 1316 */ 1317 if (dcpu->dtdsc_clean != NULL) 1318 continue; 1319 1320 work = 1; 1321 1322 /* 1323 * Atomically move the dirty list aside. 1324 */ 1325 do { 1326 dirty = dcpu->dtdsc_dirty; 1327 1328 /* 1329 * Before we zap the dirty list, set the rinsing list. 1330 * (This allows for a potential assertion in 1331 * dtrace_dynvar(): if a free dynamic variable appears 1332 * on a hash chain, either the dirty list or the 1333 * rinsing list for some CPU must be non-NULL.) 1334 */ 1335 dcpu->dtdsc_rinsing = dirty; 1336 dtrace_membar_producer(); 1337 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1338 dirty, NULL) != dirty); 1339 } 1340 1341 if (!work) { 1342 /* 1343 * We have no work to do; we can simply return. 1344 */ 1345 return; 1346 } 1347 1348 dtrace_sync(); 1349 1350 for (i = 0; i < NCPU; i++) { 1351 dcpu = &dstate->dtds_percpu[i]; 1352 1353 if (dcpu->dtdsc_rinsing == NULL) 1354 continue; 1355 1356 /* 1357 * We are now guaranteed that no hash chain contains a pointer 1358 * into this dirty list; we can make it clean. 1359 */ 1360 ASSERT(dcpu->dtdsc_clean == NULL); 1361 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1362 dcpu->dtdsc_rinsing = NULL; 1363 } 1364 1365 /* 1366 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1367 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1368 * This prevents a race whereby a CPU incorrectly decides that 1369 * the state should be something other than DTRACE_DSTATE_CLEAN 1370 * after dtrace_dynvar_clean() has completed. 1371 */ 1372 dtrace_sync(); 1373 1374 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1375 } 1376 1377 /* 1378 * Depending on the value of the op parameter, this function looks-up, 1379 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1380 * allocation is requested, this function will return a pointer to a 1381 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1382 * variable can be allocated. If NULL is returned, the appropriate counter 1383 * will be incremented. 1384 */ 1385 dtrace_dynvar_t * 1386 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1387 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1388 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1389 { 1390 uint64_t hashval = DTRACE_DYNHASH_VALID; 1391 dtrace_dynhash_t *hash = dstate->dtds_hash; 1392 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1393 processorid_t me = curcpu, cpu = me; 1394 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1395 size_t bucket, ksize; 1396 size_t chunksize = dstate->dtds_chunksize; 1397 uintptr_t kdata, lock, nstate; 1398 uint_t i; 1399 1400 ASSERT(nkeys != 0); 1401 1402 /* 1403 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1404 * algorithm. For the by-value portions, we perform the algorithm in 1405 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1406 * bit, and seems to have only a minute effect on distribution. For 1407 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1408 * over each referenced byte. It's painful to do this, but it's much 1409 * better than pathological hash distribution. The efficacy of the 1410 * hashing algorithm (and a comparison with other algorithms) may be 1411 * found by running the ::dtrace_dynstat MDB dcmd. 1412 */ 1413 for (i = 0; i < nkeys; i++) { 1414 if (key[i].dttk_size == 0) { 1415 uint64_t val = key[i].dttk_value; 1416 1417 hashval += (val >> 48) & 0xffff; 1418 hashval += (hashval << 10); 1419 hashval ^= (hashval >> 6); 1420 1421 hashval += (val >> 32) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += (val >> 16) & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 1429 hashval += val & 0xffff; 1430 hashval += (hashval << 10); 1431 hashval ^= (hashval >> 6); 1432 } else { 1433 /* 1434 * This is incredibly painful, but it beats the hell 1435 * out of the alternative. 1436 */ 1437 uint64_t j, size = key[i].dttk_size; 1438 uintptr_t base = (uintptr_t)key[i].dttk_value; 1439 1440 if (!dtrace_canload(base, size, mstate, vstate)) 1441 break; 1442 1443 for (j = 0; j < size; j++) { 1444 hashval += dtrace_load8(base + j); 1445 hashval += (hashval << 10); 1446 hashval ^= (hashval >> 6); 1447 } 1448 } 1449 } 1450 1451 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1452 return (NULL); 1453 1454 hashval += (hashval << 3); 1455 hashval ^= (hashval >> 11); 1456 hashval += (hashval << 15); 1457 1458 /* 1459 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1460 * comes out to be one of our two sentinel hash values. If this 1461 * actually happens, we set the hashval to be a value known to be a 1462 * non-sentinel value. 1463 */ 1464 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1465 hashval = DTRACE_DYNHASH_VALID; 1466 1467 /* 1468 * Yes, it's painful to do a divide here. If the cycle count becomes 1469 * important here, tricks can be pulled to reduce it. (However, it's 1470 * critical that hash collisions be kept to an absolute minimum; 1471 * they're much more painful than a divide.) It's better to have a 1472 * solution that generates few collisions and still keeps things 1473 * relatively simple. 1474 */ 1475 bucket = hashval % dstate->dtds_hashsize; 1476 1477 if (op == DTRACE_DYNVAR_DEALLOC) { 1478 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1479 1480 for (;;) { 1481 while ((lock = *lockp) & 1) 1482 continue; 1483 1484 if (dtrace_casptr((volatile void *)lockp, 1485 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1486 break; 1487 } 1488 1489 dtrace_membar_producer(); 1490 } 1491 1492 top: 1493 prev = NULL; 1494 lock = hash[bucket].dtdh_lock; 1495 1496 dtrace_membar_consumer(); 1497 1498 start = hash[bucket].dtdh_chain; 1499 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1500 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1501 op != DTRACE_DYNVAR_DEALLOC)); 1502 1503 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1504 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1505 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1506 1507 if (dvar->dtdv_hashval != hashval) { 1508 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1509 /* 1510 * We've reached the sink, and therefore the 1511 * end of the hash chain; we can kick out of 1512 * the loop knowing that we have seen a valid 1513 * snapshot of state. 1514 */ 1515 ASSERT(dvar->dtdv_next == NULL); 1516 ASSERT(dvar == &dtrace_dynhash_sink); 1517 break; 1518 } 1519 1520 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1521 /* 1522 * We've gone off the rails: somewhere along 1523 * the line, one of the members of this hash 1524 * chain was deleted. Note that we could also 1525 * detect this by simply letting this loop run 1526 * to completion, as we would eventually hit 1527 * the end of the dirty list. However, we 1528 * want to avoid running the length of the 1529 * dirty list unnecessarily (it might be quite 1530 * long), so we catch this as early as 1531 * possible by detecting the hash marker. In 1532 * this case, we simply set dvar to NULL and 1533 * break; the conditional after the loop will 1534 * send us back to top. 1535 */ 1536 dvar = NULL; 1537 break; 1538 } 1539 1540 goto next; 1541 } 1542 1543 if (dtuple->dtt_nkeys != nkeys) 1544 goto next; 1545 1546 for (i = 0; i < nkeys; i++, dkey++) { 1547 if (dkey->dttk_size != key[i].dttk_size) 1548 goto next; /* size or type mismatch */ 1549 1550 if (dkey->dttk_size != 0) { 1551 if (dtrace_bcmp( 1552 (void *)(uintptr_t)key[i].dttk_value, 1553 (void *)(uintptr_t)dkey->dttk_value, 1554 dkey->dttk_size)) 1555 goto next; 1556 } else { 1557 if (dkey->dttk_value != key[i].dttk_value) 1558 goto next; 1559 } 1560 } 1561 1562 if (op != DTRACE_DYNVAR_DEALLOC) 1563 return (dvar); 1564 1565 ASSERT(dvar->dtdv_next == NULL || 1566 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1567 1568 if (prev != NULL) { 1569 ASSERT(hash[bucket].dtdh_chain != dvar); 1570 ASSERT(start != dvar); 1571 ASSERT(prev->dtdv_next == dvar); 1572 prev->dtdv_next = dvar->dtdv_next; 1573 } else { 1574 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1575 start, dvar->dtdv_next) != start) { 1576 /* 1577 * We have failed to atomically swing the 1578 * hash table head pointer, presumably because 1579 * of a conflicting allocation on another CPU. 1580 * We need to reread the hash chain and try 1581 * again. 1582 */ 1583 goto top; 1584 } 1585 } 1586 1587 dtrace_membar_producer(); 1588 1589 /* 1590 * Now set the hash value to indicate that it's free. 1591 */ 1592 ASSERT(hash[bucket].dtdh_chain != dvar); 1593 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1594 1595 dtrace_membar_producer(); 1596 1597 /* 1598 * Set the next pointer to point at the dirty list, and 1599 * atomically swing the dirty pointer to the newly freed dvar. 1600 */ 1601 do { 1602 next = dcpu->dtdsc_dirty; 1603 dvar->dtdv_next = next; 1604 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1605 1606 /* 1607 * Finally, unlock this hash bucket. 1608 */ 1609 ASSERT(hash[bucket].dtdh_lock == lock); 1610 ASSERT(lock & 1); 1611 hash[bucket].dtdh_lock++; 1612 1613 return (NULL); 1614 next: 1615 prev = dvar; 1616 continue; 1617 } 1618 1619 if (dvar == NULL) { 1620 /* 1621 * If dvar is NULL, it is because we went off the rails: 1622 * one of the elements that we traversed in the hash chain 1623 * was deleted while we were traversing it. In this case, 1624 * we assert that we aren't doing a dealloc (deallocs lock 1625 * the hash bucket to prevent themselves from racing with 1626 * one another), and retry the hash chain traversal. 1627 */ 1628 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1629 goto top; 1630 } 1631 1632 if (op != DTRACE_DYNVAR_ALLOC) { 1633 /* 1634 * If we are not to allocate a new variable, we want to 1635 * return NULL now. Before we return, check that the value 1636 * of the lock word hasn't changed. If it has, we may have 1637 * seen an inconsistent snapshot. 1638 */ 1639 if (op == DTRACE_DYNVAR_NOALLOC) { 1640 if (hash[bucket].dtdh_lock != lock) 1641 goto top; 1642 } else { 1643 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1644 ASSERT(hash[bucket].dtdh_lock == lock); 1645 ASSERT(lock & 1); 1646 hash[bucket].dtdh_lock++; 1647 } 1648 1649 return (NULL); 1650 } 1651 1652 /* 1653 * We need to allocate a new dynamic variable. The size we need is the 1654 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1655 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1656 * the size of any referred-to data (dsize). We then round the final 1657 * size up to the chunksize for allocation. 1658 */ 1659 for (ksize = 0, i = 0; i < nkeys; i++) 1660 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1661 1662 /* 1663 * This should be pretty much impossible, but could happen if, say, 1664 * strange DIF specified the tuple. Ideally, this should be an 1665 * assertion and not an error condition -- but that requires that the 1666 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1667 * bullet-proof. (That is, it must not be able to be fooled by 1668 * malicious DIF.) Given the lack of backwards branches in DIF, 1669 * solving this would presumably not amount to solving the Halting 1670 * Problem -- but it still seems awfully hard. 1671 */ 1672 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1673 ksize + dsize > chunksize) { 1674 dcpu->dtdsc_drops++; 1675 return (NULL); 1676 } 1677 1678 nstate = DTRACE_DSTATE_EMPTY; 1679 1680 do { 1681 retry: 1682 free = dcpu->dtdsc_free; 1683 1684 if (free == NULL) { 1685 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1686 void *rval; 1687 1688 if (clean == NULL) { 1689 /* 1690 * We're out of dynamic variable space on 1691 * this CPU. Unless we have tried all CPUs, 1692 * we'll try to allocate from a different 1693 * CPU. 1694 */ 1695 switch (dstate->dtds_state) { 1696 case DTRACE_DSTATE_CLEAN: { 1697 void *sp = &dstate->dtds_state; 1698 1699 if (++cpu >= NCPU) 1700 cpu = 0; 1701 1702 if (dcpu->dtdsc_dirty != NULL && 1703 nstate == DTRACE_DSTATE_EMPTY) 1704 nstate = DTRACE_DSTATE_DIRTY; 1705 1706 if (dcpu->dtdsc_rinsing != NULL) 1707 nstate = DTRACE_DSTATE_RINSING; 1708 1709 dcpu = &dstate->dtds_percpu[cpu]; 1710 1711 if (cpu != me) 1712 goto retry; 1713 1714 (void) dtrace_cas32(sp, 1715 DTRACE_DSTATE_CLEAN, nstate); 1716 1717 /* 1718 * To increment the correct bean 1719 * counter, take another lap. 1720 */ 1721 goto retry; 1722 } 1723 1724 case DTRACE_DSTATE_DIRTY: 1725 dcpu->dtdsc_dirty_drops++; 1726 break; 1727 1728 case DTRACE_DSTATE_RINSING: 1729 dcpu->dtdsc_rinsing_drops++; 1730 break; 1731 1732 case DTRACE_DSTATE_EMPTY: 1733 dcpu->dtdsc_drops++; 1734 break; 1735 } 1736 1737 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1738 return (NULL); 1739 } 1740 1741 /* 1742 * The clean list appears to be non-empty. We want to 1743 * move the clean list to the free list; we start by 1744 * moving the clean pointer aside. 1745 */ 1746 if (dtrace_casptr(&dcpu->dtdsc_clean, 1747 clean, NULL) != clean) { 1748 /* 1749 * We are in one of two situations: 1750 * 1751 * (a) The clean list was switched to the 1752 * free list by another CPU. 1753 * 1754 * (b) The clean list was added to by the 1755 * cleansing cyclic. 1756 * 1757 * In either of these situations, we can 1758 * just reattempt the free list allocation. 1759 */ 1760 goto retry; 1761 } 1762 1763 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1764 1765 /* 1766 * Now we'll move the clean list to the free list. 1767 * It's impossible for this to fail: the only way 1768 * the free list can be updated is through this 1769 * code path, and only one CPU can own the clean list. 1770 * Thus, it would only be possible for this to fail if 1771 * this code were racing with dtrace_dynvar_clean(). 1772 * (That is, if dtrace_dynvar_clean() updated the clean 1773 * list, and we ended up racing to update the free 1774 * list.) This race is prevented by the dtrace_sync() 1775 * in dtrace_dynvar_clean() -- which flushes the 1776 * owners of the clean lists out before resetting 1777 * the clean lists. 1778 */ 1779 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1780 ASSERT(rval == NULL); 1781 goto retry; 1782 } 1783 1784 dvar = free; 1785 new_free = dvar->dtdv_next; 1786 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1787 1788 /* 1789 * We have now allocated a new chunk. We copy the tuple keys into the 1790 * tuple array and copy any referenced key data into the data space 1791 * following the tuple array. As we do this, we relocate dttk_value 1792 * in the final tuple to point to the key data address in the chunk. 1793 */ 1794 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1795 dvar->dtdv_data = (void *)(kdata + ksize); 1796 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1797 1798 for (i = 0; i < nkeys; i++) { 1799 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1800 size_t kesize = key[i].dttk_size; 1801 1802 if (kesize != 0) { 1803 dtrace_bcopy( 1804 (const void *)(uintptr_t)key[i].dttk_value, 1805 (void *)kdata, kesize); 1806 dkey->dttk_value = kdata; 1807 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1808 } else { 1809 dkey->dttk_value = key[i].dttk_value; 1810 } 1811 1812 dkey->dttk_size = kesize; 1813 } 1814 1815 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1816 dvar->dtdv_hashval = hashval; 1817 dvar->dtdv_next = start; 1818 1819 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1820 return (dvar); 1821 1822 /* 1823 * The cas has failed. Either another CPU is adding an element to 1824 * this hash chain, or another CPU is deleting an element from this 1825 * hash chain. The simplest way to deal with both of these cases 1826 * (though not necessarily the most efficient) is to free our 1827 * allocated block and tail-call ourselves. Note that the free is 1828 * to the dirty list and _not_ to the free list. This is to prevent 1829 * races with allocators, above. 1830 */ 1831 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1832 1833 dtrace_membar_producer(); 1834 1835 do { 1836 free = dcpu->dtdsc_dirty; 1837 dvar->dtdv_next = free; 1838 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1839 1840 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1841 } 1842 1843 /*ARGSUSED*/ 1844 static void 1845 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1846 { 1847 if ((int64_t)nval < (int64_t)*oval) 1848 *oval = nval; 1849 } 1850 1851 /*ARGSUSED*/ 1852 static void 1853 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1854 { 1855 if ((int64_t)nval > (int64_t)*oval) 1856 *oval = nval; 1857 } 1858 1859 static void 1860 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1861 { 1862 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1863 int64_t val = (int64_t)nval; 1864 1865 if (val < 0) { 1866 for (i = 0; i < zero; i++) { 1867 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1868 quanta[i] += incr; 1869 return; 1870 } 1871 } 1872 } else { 1873 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1874 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1875 quanta[i - 1] += incr; 1876 return; 1877 } 1878 } 1879 1880 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1881 return; 1882 } 1883 1884 ASSERT(0); 1885 } 1886 1887 static void 1888 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1889 { 1890 uint64_t arg = *lquanta++; 1891 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1892 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1893 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1894 int32_t val = (int32_t)nval, level; 1895 1896 ASSERT(step != 0); 1897 ASSERT(levels != 0); 1898 1899 if (val < base) { 1900 /* 1901 * This is an underflow. 1902 */ 1903 lquanta[0] += incr; 1904 return; 1905 } 1906 1907 level = (val - base) / step; 1908 1909 if (level < levels) { 1910 lquanta[level + 1] += incr; 1911 return; 1912 } 1913 1914 /* 1915 * This is an overflow. 1916 */ 1917 lquanta[levels + 1] += incr; 1918 } 1919 1920 /*ARGSUSED*/ 1921 static void 1922 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1923 { 1924 data[0]++; 1925 data[1] += nval; 1926 } 1927 1928 /*ARGSUSED*/ 1929 static void 1930 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1931 { 1932 int64_t snval = (int64_t)nval; 1933 uint64_t tmp[2]; 1934 1935 data[0]++; 1936 data[1] += nval; 1937 1938 /* 1939 * What we want to say here is: 1940 * 1941 * data[2] += nval * nval; 1942 * 1943 * But given that nval is 64-bit, we could easily overflow, so 1944 * we do this as 128-bit arithmetic. 1945 */ 1946 if (snval < 0) 1947 snval = -snval; 1948 1949 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1950 dtrace_add_128(data + 2, tmp, data + 2); 1951 } 1952 1953 /*ARGSUSED*/ 1954 static void 1955 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1956 { 1957 *oval = *oval + 1; 1958 } 1959 1960 /*ARGSUSED*/ 1961 static void 1962 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1963 { 1964 *oval += nval; 1965 } 1966 1967 /* 1968 * Aggregate given the tuple in the principal data buffer, and the aggregating 1969 * action denoted by the specified dtrace_aggregation_t. The aggregation 1970 * buffer is specified as the buf parameter. This routine does not return 1971 * failure; if there is no space in the aggregation buffer, the data will be 1972 * dropped, and a corresponding counter incremented. 1973 */ 1974 static void 1975 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1976 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1977 { 1978 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1979 uint32_t i, ndx, size, fsize; 1980 uint32_t align = sizeof (uint64_t) - 1; 1981 dtrace_aggbuffer_t *agb; 1982 dtrace_aggkey_t *key; 1983 uint32_t hashval = 0, limit, isstr; 1984 caddr_t tomax, data, kdata; 1985 dtrace_actkind_t action; 1986 dtrace_action_t *act; 1987 uintptr_t offs; 1988 1989 if (buf == NULL) 1990 return; 1991 1992 if (!agg->dtag_hasarg) { 1993 /* 1994 * Currently, only quantize() and lquantize() take additional 1995 * arguments, and they have the same semantics: an increment 1996 * value that defaults to 1 when not present. If additional 1997 * aggregating actions take arguments, the setting of the 1998 * default argument value will presumably have to become more 1999 * sophisticated... 2000 */ 2001 arg = 1; 2002 } 2003 2004 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2005 size = rec->dtrd_offset - agg->dtag_base; 2006 fsize = size + rec->dtrd_size; 2007 2008 ASSERT(dbuf->dtb_tomax != NULL); 2009 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2010 2011 if ((tomax = buf->dtb_tomax) == NULL) { 2012 dtrace_buffer_drop(buf); 2013 return; 2014 } 2015 2016 /* 2017 * The metastructure is always at the bottom of the buffer. 2018 */ 2019 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2020 sizeof (dtrace_aggbuffer_t)); 2021 2022 if (buf->dtb_offset == 0) { 2023 /* 2024 * We just kludge up approximately 1/8th of the size to be 2025 * buckets. If this guess ends up being routinely 2026 * off-the-mark, we may need to dynamically readjust this 2027 * based on past performance. 2028 */ 2029 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2030 2031 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2032 (uintptr_t)tomax || hashsize == 0) { 2033 /* 2034 * We've been given a ludicrously small buffer; 2035 * increment our drop count and leave. 2036 */ 2037 dtrace_buffer_drop(buf); 2038 return; 2039 } 2040 2041 /* 2042 * And now, a pathetic attempt to try to get a an odd (or 2043 * perchance, a prime) hash size for better hash distribution. 2044 */ 2045 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2046 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2047 2048 agb->dtagb_hashsize = hashsize; 2049 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2050 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2051 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2052 2053 for (i = 0; i < agb->dtagb_hashsize; i++) 2054 agb->dtagb_hash[i] = NULL; 2055 } 2056 2057 ASSERT(agg->dtag_first != NULL); 2058 ASSERT(agg->dtag_first->dta_intuple); 2059 2060 /* 2061 * Calculate the hash value based on the key. Note that we _don't_ 2062 * include the aggid in the hashing (but we will store it as part of 2063 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2064 * algorithm: a simple, quick algorithm that has no known funnels, and 2065 * gets good distribution in practice. The efficacy of the hashing 2066 * algorithm (and a comparison with other algorithms) may be found by 2067 * running the ::dtrace_aggstat MDB dcmd. 2068 */ 2069 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2070 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2071 limit = i + act->dta_rec.dtrd_size; 2072 ASSERT(limit <= size); 2073 isstr = DTRACEACT_ISSTRING(act); 2074 2075 for (; i < limit; i++) { 2076 hashval += data[i]; 2077 hashval += (hashval << 10); 2078 hashval ^= (hashval >> 6); 2079 2080 if (isstr && data[i] == '\0') 2081 break; 2082 } 2083 } 2084 2085 hashval += (hashval << 3); 2086 hashval ^= (hashval >> 11); 2087 hashval += (hashval << 15); 2088 2089 /* 2090 * Yes, the divide here is expensive -- but it's generally the least 2091 * of the performance issues given the amount of data that we iterate 2092 * over to compute hash values, compare data, etc. 2093 */ 2094 ndx = hashval % agb->dtagb_hashsize; 2095 2096 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2097 ASSERT((caddr_t)key >= tomax); 2098 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2099 2100 if (hashval != key->dtak_hashval || key->dtak_size != size) 2101 continue; 2102 2103 kdata = key->dtak_data; 2104 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2105 2106 for (act = agg->dtag_first; act->dta_intuple; 2107 act = act->dta_next) { 2108 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2109 limit = i + act->dta_rec.dtrd_size; 2110 ASSERT(limit <= size); 2111 isstr = DTRACEACT_ISSTRING(act); 2112 2113 for (; i < limit; i++) { 2114 if (kdata[i] != data[i]) 2115 goto next; 2116 2117 if (isstr && data[i] == '\0') 2118 break; 2119 } 2120 } 2121 2122 if (action != key->dtak_action) { 2123 /* 2124 * We are aggregating on the same value in the same 2125 * aggregation with two different aggregating actions. 2126 * (This should have been picked up in the compiler, 2127 * so we may be dealing with errant or devious DIF.) 2128 * This is an error condition; we indicate as much, 2129 * and return. 2130 */ 2131 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2132 return; 2133 } 2134 2135 /* 2136 * This is a hit: we need to apply the aggregator to 2137 * the value at this key. 2138 */ 2139 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2140 return; 2141 next: 2142 continue; 2143 } 2144 2145 /* 2146 * We didn't find it. We need to allocate some zero-filled space, 2147 * link it into the hash table appropriately, and apply the aggregator 2148 * to the (zero-filled) value. 2149 */ 2150 offs = buf->dtb_offset; 2151 while (offs & (align - 1)) 2152 offs += sizeof (uint32_t); 2153 2154 /* 2155 * If we don't have enough room to both allocate a new key _and_ 2156 * its associated data, increment the drop count and return. 2157 */ 2158 if ((uintptr_t)tomax + offs + fsize > 2159 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2160 dtrace_buffer_drop(buf); 2161 return; 2162 } 2163 2164 /*CONSTCOND*/ 2165 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2166 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2167 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2168 2169 key->dtak_data = kdata = tomax + offs; 2170 buf->dtb_offset = offs + fsize; 2171 2172 /* 2173 * Now copy the data across. 2174 */ 2175 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2176 2177 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2178 kdata[i] = data[i]; 2179 2180 /* 2181 * Because strings are not zeroed out by default, we need to iterate 2182 * looking for actions that store strings, and we need to explicitly 2183 * pad these strings out with zeroes. 2184 */ 2185 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2186 int nul; 2187 2188 if (!DTRACEACT_ISSTRING(act)) 2189 continue; 2190 2191 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2192 limit = i + act->dta_rec.dtrd_size; 2193 ASSERT(limit <= size); 2194 2195 for (nul = 0; i < limit; i++) { 2196 if (nul) { 2197 kdata[i] = '\0'; 2198 continue; 2199 } 2200 2201 if (data[i] != '\0') 2202 continue; 2203 2204 nul = 1; 2205 } 2206 } 2207 2208 for (i = size; i < fsize; i++) 2209 kdata[i] = 0; 2210 2211 key->dtak_hashval = hashval; 2212 key->dtak_size = size; 2213 key->dtak_action = action; 2214 key->dtak_next = agb->dtagb_hash[ndx]; 2215 agb->dtagb_hash[ndx] = key; 2216 2217 /* 2218 * Finally, apply the aggregator. 2219 */ 2220 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2221 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2222 } 2223 2224 /* 2225 * Given consumer state, this routine finds a speculation in the INACTIVE 2226 * state and transitions it into the ACTIVE state. If there is no speculation 2227 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2228 * incremented -- it is up to the caller to take appropriate action. 2229 */ 2230 static int 2231 dtrace_speculation(dtrace_state_t *state) 2232 { 2233 int i = 0; 2234 dtrace_speculation_state_t current; 2235 uint32_t *stat = &state->dts_speculations_unavail, count; 2236 2237 while (i < state->dts_nspeculations) { 2238 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2239 2240 current = spec->dtsp_state; 2241 2242 if (current != DTRACESPEC_INACTIVE) { 2243 if (current == DTRACESPEC_COMMITTINGMANY || 2244 current == DTRACESPEC_COMMITTING || 2245 current == DTRACESPEC_DISCARDING) 2246 stat = &state->dts_speculations_busy; 2247 i++; 2248 continue; 2249 } 2250 2251 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2252 current, DTRACESPEC_ACTIVE) == current) 2253 return (i + 1); 2254 } 2255 2256 /* 2257 * We couldn't find a speculation. If we found as much as a single 2258 * busy speculation buffer, we'll attribute this failure as "busy" 2259 * instead of "unavail". 2260 */ 2261 do { 2262 count = *stat; 2263 } while (dtrace_cas32(stat, count, count + 1) != count); 2264 2265 return (0); 2266 } 2267 2268 /* 2269 * This routine commits an active speculation. If the specified speculation 2270 * is not in a valid state to perform a commit(), this routine will silently do 2271 * nothing. The state of the specified speculation is transitioned according 2272 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2273 */ 2274 static void 2275 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2276 dtrace_specid_t which) 2277 { 2278 dtrace_speculation_t *spec; 2279 dtrace_buffer_t *src, *dest; 2280 uintptr_t daddr, saddr, dlimit; 2281 dtrace_speculation_state_t current, new = 0; 2282 intptr_t offs; 2283 2284 if (which == 0) 2285 return; 2286 2287 if (which > state->dts_nspeculations) { 2288 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2289 return; 2290 } 2291 2292 spec = &state->dts_speculations[which - 1]; 2293 src = &spec->dtsp_buffer[cpu]; 2294 dest = &state->dts_buffer[cpu]; 2295 2296 do { 2297 current = spec->dtsp_state; 2298 2299 if (current == DTRACESPEC_COMMITTINGMANY) 2300 break; 2301 2302 switch (current) { 2303 case DTRACESPEC_INACTIVE: 2304 case DTRACESPEC_DISCARDING: 2305 return; 2306 2307 case DTRACESPEC_COMMITTING: 2308 /* 2309 * This is only possible if we are (a) commit()'ing 2310 * without having done a prior speculate() on this CPU 2311 * and (b) racing with another commit() on a different 2312 * CPU. There's nothing to do -- we just assert that 2313 * our offset is 0. 2314 */ 2315 ASSERT(src->dtb_offset == 0); 2316 return; 2317 2318 case DTRACESPEC_ACTIVE: 2319 new = DTRACESPEC_COMMITTING; 2320 break; 2321 2322 case DTRACESPEC_ACTIVEONE: 2323 /* 2324 * This speculation is active on one CPU. If our 2325 * buffer offset is non-zero, we know that the one CPU 2326 * must be us. Otherwise, we are committing on a 2327 * different CPU from the speculate(), and we must 2328 * rely on being asynchronously cleaned. 2329 */ 2330 if (src->dtb_offset != 0) { 2331 new = DTRACESPEC_COMMITTING; 2332 break; 2333 } 2334 /*FALLTHROUGH*/ 2335 2336 case DTRACESPEC_ACTIVEMANY: 2337 new = DTRACESPEC_COMMITTINGMANY; 2338 break; 2339 2340 default: 2341 ASSERT(0); 2342 } 2343 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2344 current, new) != current); 2345 2346 /* 2347 * We have set the state to indicate that we are committing this 2348 * speculation. Now reserve the necessary space in the destination 2349 * buffer. 2350 */ 2351 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2352 sizeof (uint64_t), state, NULL)) < 0) { 2353 dtrace_buffer_drop(dest); 2354 goto out; 2355 } 2356 2357 /* 2358 * We have the space; copy the buffer across. (Note that this is a 2359 * highly subobtimal bcopy(); in the unlikely event that this becomes 2360 * a serious performance issue, a high-performance DTrace-specific 2361 * bcopy() should obviously be invented.) 2362 */ 2363 daddr = (uintptr_t)dest->dtb_tomax + offs; 2364 dlimit = daddr + src->dtb_offset; 2365 saddr = (uintptr_t)src->dtb_tomax; 2366 2367 /* 2368 * First, the aligned portion. 2369 */ 2370 while (dlimit - daddr >= sizeof (uint64_t)) { 2371 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2372 2373 daddr += sizeof (uint64_t); 2374 saddr += sizeof (uint64_t); 2375 } 2376 2377 /* 2378 * Now any left-over bit... 2379 */ 2380 while (dlimit - daddr) 2381 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2382 2383 /* 2384 * Finally, commit the reserved space in the destination buffer. 2385 */ 2386 dest->dtb_offset = offs + src->dtb_offset; 2387 2388 out: 2389 /* 2390 * If we're lucky enough to be the only active CPU on this speculation 2391 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2392 */ 2393 if (current == DTRACESPEC_ACTIVE || 2394 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2395 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2396 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2397 2398 ASSERT(rval == DTRACESPEC_COMMITTING); 2399 } 2400 2401 src->dtb_offset = 0; 2402 src->dtb_xamot_drops += src->dtb_drops; 2403 src->dtb_drops = 0; 2404 } 2405 2406 /* 2407 * This routine discards an active speculation. If the specified speculation 2408 * is not in a valid state to perform a discard(), this routine will silently 2409 * do nothing. The state of the specified speculation is transitioned 2410 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2411 */ 2412 static void 2413 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2414 dtrace_specid_t which) 2415 { 2416 dtrace_speculation_t *spec; 2417 dtrace_speculation_state_t current, new = 0; 2418 dtrace_buffer_t *buf; 2419 2420 if (which == 0) 2421 return; 2422 2423 if (which > state->dts_nspeculations) { 2424 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2425 return; 2426 } 2427 2428 spec = &state->dts_speculations[which - 1]; 2429 buf = &spec->dtsp_buffer[cpu]; 2430 2431 do { 2432 current = spec->dtsp_state; 2433 2434 switch (current) { 2435 case DTRACESPEC_INACTIVE: 2436 case DTRACESPEC_COMMITTINGMANY: 2437 case DTRACESPEC_COMMITTING: 2438 case DTRACESPEC_DISCARDING: 2439 return; 2440 2441 case DTRACESPEC_ACTIVE: 2442 case DTRACESPEC_ACTIVEMANY: 2443 new = DTRACESPEC_DISCARDING; 2444 break; 2445 2446 case DTRACESPEC_ACTIVEONE: 2447 if (buf->dtb_offset != 0) { 2448 new = DTRACESPEC_INACTIVE; 2449 } else { 2450 new = DTRACESPEC_DISCARDING; 2451 } 2452 break; 2453 2454 default: 2455 ASSERT(0); 2456 } 2457 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2458 current, new) != current); 2459 2460 buf->dtb_offset = 0; 2461 buf->dtb_drops = 0; 2462 } 2463 2464 /* 2465 * Note: not called from probe context. This function is called 2466 * asynchronously from cross call context to clean any speculations that are 2467 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2468 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2469 * speculation. 2470 */ 2471 static void 2472 dtrace_speculation_clean_here(dtrace_state_t *state) 2473 { 2474 dtrace_icookie_t cookie; 2475 processorid_t cpu = curcpu; 2476 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2477 dtrace_specid_t i; 2478 2479 cookie = dtrace_interrupt_disable(); 2480 2481 if (dest->dtb_tomax == NULL) { 2482 dtrace_interrupt_enable(cookie); 2483 return; 2484 } 2485 2486 for (i = 0; i < state->dts_nspeculations; i++) { 2487 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2488 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2489 2490 if (src->dtb_tomax == NULL) 2491 continue; 2492 2493 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2494 src->dtb_offset = 0; 2495 continue; 2496 } 2497 2498 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2499 continue; 2500 2501 if (src->dtb_offset == 0) 2502 continue; 2503 2504 dtrace_speculation_commit(state, cpu, i + 1); 2505 } 2506 2507 dtrace_interrupt_enable(cookie); 2508 } 2509 2510 /* 2511 * Note: not called from probe context. This function is called 2512 * asynchronously (and at a regular interval) to clean any speculations that 2513 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2514 * is work to be done, it cross calls all CPUs to perform that work; 2515 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2516 * INACTIVE state until they have been cleaned by all CPUs. 2517 */ 2518 static void 2519 dtrace_speculation_clean(dtrace_state_t *state) 2520 { 2521 int work = 0, rv; 2522 dtrace_specid_t i; 2523 2524 for (i = 0; i < state->dts_nspeculations; i++) { 2525 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2526 2527 ASSERT(!spec->dtsp_cleaning); 2528 2529 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2530 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2531 continue; 2532 2533 work++; 2534 spec->dtsp_cleaning = 1; 2535 } 2536 2537 if (!work) 2538 return; 2539 2540 dtrace_xcall(DTRACE_CPUALL, 2541 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2542 2543 /* 2544 * We now know that all CPUs have committed or discarded their 2545 * speculation buffers, as appropriate. We can now set the state 2546 * to inactive. 2547 */ 2548 for (i = 0; i < state->dts_nspeculations; i++) { 2549 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2550 dtrace_speculation_state_t current, new; 2551 2552 if (!spec->dtsp_cleaning) 2553 continue; 2554 2555 current = spec->dtsp_state; 2556 ASSERT(current == DTRACESPEC_DISCARDING || 2557 current == DTRACESPEC_COMMITTINGMANY); 2558 2559 new = DTRACESPEC_INACTIVE; 2560 2561 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2562 ASSERT(rv == current); 2563 spec->dtsp_cleaning = 0; 2564 } 2565 } 2566 2567 /* 2568 * Called as part of a speculate() to get the speculative buffer associated 2569 * with a given speculation. Returns NULL if the specified speculation is not 2570 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2571 * the active CPU is not the specified CPU -- the speculation will be 2572 * atomically transitioned into the ACTIVEMANY state. 2573 */ 2574 static dtrace_buffer_t * 2575 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2576 dtrace_specid_t which) 2577 { 2578 dtrace_speculation_t *spec; 2579 dtrace_speculation_state_t current, new = 0; 2580 dtrace_buffer_t *buf; 2581 2582 if (which == 0) 2583 return (NULL); 2584 2585 if (which > state->dts_nspeculations) { 2586 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2587 return (NULL); 2588 } 2589 2590 spec = &state->dts_speculations[which - 1]; 2591 buf = &spec->dtsp_buffer[cpuid]; 2592 2593 do { 2594 current = spec->dtsp_state; 2595 2596 switch (current) { 2597 case DTRACESPEC_INACTIVE: 2598 case DTRACESPEC_COMMITTINGMANY: 2599 case DTRACESPEC_DISCARDING: 2600 return (NULL); 2601 2602 case DTRACESPEC_COMMITTING: 2603 ASSERT(buf->dtb_offset == 0); 2604 return (NULL); 2605 2606 case DTRACESPEC_ACTIVEONE: 2607 /* 2608 * This speculation is currently active on one CPU. 2609 * Check the offset in the buffer; if it's non-zero, 2610 * that CPU must be us (and we leave the state alone). 2611 * If it's zero, assume that we're starting on a new 2612 * CPU -- and change the state to indicate that the 2613 * speculation is active on more than one CPU. 2614 */ 2615 if (buf->dtb_offset != 0) 2616 return (buf); 2617 2618 new = DTRACESPEC_ACTIVEMANY; 2619 break; 2620 2621 case DTRACESPEC_ACTIVEMANY: 2622 return (buf); 2623 2624 case DTRACESPEC_ACTIVE: 2625 new = DTRACESPEC_ACTIVEONE; 2626 break; 2627 2628 default: 2629 ASSERT(0); 2630 } 2631 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2632 current, new) != current); 2633 2634 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2635 return (buf); 2636 } 2637 2638 /* 2639 * Return a string. In the event that the user lacks the privilege to access 2640 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2641 * don't fail access checking. 2642 * 2643 * dtrace_dif_variable() uses this routine as a helper for various 2644 * builtin values such as 'execname' and 'probefunc.' 2645 */ 2646 uintptr_t 2647 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2648 dtrace_mstate_t *mstate) 2649 { 2650 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2651 uintptr_t ret; 2652 size_t strsz; 2653 2654 /* 2655 * The easy case: this probe is allowed to read all of memory, so 2656 * we can just return this as a vanilla pointer. 2657 */ 2658 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2659 return (addr); 2660 2661 /* 2662 * This is the tougher case: we copy the string in question from 2663 * kernel memory into scratch memory and return it that way: this 2664 * ensures that we won't trip up when access checking tests the 2665 * BYREF return value. 2666 */ 2667 strsz = dtrace_strlen((char *)addr, size) + 1; 2668 2669 if (mstate->dtms_scratch_ptr + strsz > 2670 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2671 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2672 return (0); 2673 } 2674 2675 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2676 strsz); 2677 ret = mstate->dtms_scratch_ptr; 2678 mstate->dtms_scratch_ptr += strsz; 2679 return (ret); 2680 } 2681 2682 /* 2683 * Return a string from a memoy address which is known to have one or 2684 * more concatenated, individually zero terminated, sub-strings. 2685 * In the event that the user lacks the privilege to access 2686 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2687 * don't fail access checking. 2688 * 2689 * dtrace_dif_variable() uses this routine as a helper for various 2690 * builtin values such as 'execargs'. 2691 */ 2692 static uintptr_t 2693 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2694 dtrace_mstate_t *mstate) 2695 { 2696 char *p; 2697 size_t i; 2698 uintptr_t ret; 2699 2700 if (mstate->dtms_scratch_ptr + strsz > 2701 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2702 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2703 return (0); 2704 } 2705 2706 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2707 strsz); 2708 2709 /* Replace sub-string termination characters with a space. */ 2710 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2711 p++, i++) 2712 if (*p == '\0') 2713 *p = ' '; 2714 2715 ret = mstate->dtms_scratch_ptr; 2716 mstate->dtms_scratch_ptr += strsz; 2717 return (ret); 2718 } 2719 2720 /* 2721 * This function implements the DIF emulator's variable lookups. The emulator 2722 * passes a reserved variable identifier and optional built-in array index. 2723 */ 2724 static uint64_t 2725 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2726 uint64_t ndx) 2727 { 2728 /* 2729 * If we're accessing one of the uncached arguments, we'll turn this 2730 * into a reference in the args array. 2731 */ 2732 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2733 ndx = v - DIF_VAR_ARG0; 2734 v = DIF_VAR_ARGS; 2735 } 2736 2737 switch (v) { 2738 case DIF_VAR_ARGS: 2739 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2740 if (ndx >= sizeof (mstate->dtms_arg) / 2741 sizeof (mstate->dtms_arg[0])) { 2742 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2743 dtrace_provider_t *pv; 2744 uint64_t val; 2745 2746 pv = mstate->dtms_probe->dtpr_provider; 2747 if (pv->dtpv_pops.dtps_getargval != NULL) 2748 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2749 mstate->dtms_probe->dtpr_id, 2750 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2751 else 2752 val = dtrace_getarg(ndx, aframes); 2753 2754 /* 2755 * This is regrettably required to keep the compiler 2756 * from tail-optimizing the call to dtrace_getarg(). 2757 * The condition always evaluates to true, but the 2758 * compiler has no way of figuring that out a priori. 2759 * (None of this would be necessary if the compiler 2760 * could be relied upon to _always_ tail-optimize 2761 * the call to dtrace_getarg() -- but it can't.) 2762 */ 2763 if (mstate->dtms_probe != NULL) 2764 return (val); 2765 2766 ASSERT(0); 2767 } 2768 2769 return (mstate->dtms_arg[ndx]); 2770 2771 #if defined(sun) 2772 case DIF_VAR_UREGS: { 2773 klwp_t *lwp; 2774 2775 if (!dtrace_priv_proc(state)) 2776 return (0); 2777 2778 if ((lwp = curthread->t_lwp) == NULL) { 2779 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2780 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2781 return (0); 2782 } 2783 2784 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2785 return (0); 2786 } 2787 #endif 2788 2789 case DIF_VAR_CURTHREAD: 2790 if (!dtrace_priv_kernel(state)) 2791 return (0); 2792 return ((uint64_t)(uintptr_t)curthread); 2793 2794 case DIF_VAR_TIMESTAMP: 2795 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2796 mstate->dtms_timestamp = dtrace_gethrtime(); 2797 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2798 } 2799 return (mstate->dtms_timestamp); 2800 2801 case DIF_VAR_VTIMESTAMP: 2802 ASSERT(dtrace_vtime_references != 0); 2803 return (curthread->t_dtrace_vtime); 2804 2805 case DIF_VAR_WALLTIMESTAMP: 2806 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2807 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2808 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2809 } 2810 return (mstate->dtms_walltimestamp); 2811 2812 #if defined(sun) 2813 case DIF_VAR_IPL: 2814 if (!dtrace_priv_kernel(state)) 2815 return (0); 2816 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2817 mstate->dtms_ipl = dtrace_getipl(); 2818 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2819 } 2820 return (mstate->dtms_ipl); 2821 #endif 2822 2823 case DIF_VAR_EPID: 2824 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2825 return (mstate->dtms_epid); 2826 2827 case DIF_VAR_ID: 2828 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2829 return (mstate->dtms_probe->dtpr_id); 2830 2831 case DIF_VAR_STACKDEPTH: 2832 if (!dtrace_priv_kernel(state)) 2833 return (0); 2834 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2835 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2836 2837 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2838 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2839 } 2840 return (mstate->dtms_stackdepth); 2841 2842 #if defined(sun) 2843 case DIF_VAR_USTACKDEPTH: 2844 if (!dtrace_priv_proc(state)) 2845 return (0); 2846 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2847 /* 2848 * See comment in DIF_VAR_PID. 2849 */ 2850 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2851 CPU_ON_INTR(CPU)) { 2852 mstate->dtms_ustackdepth = 0; 2853 } else { 2854 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2855 mstate->dtms_ustackdepth = 2856 dtrace_getustackdepth(); 2857 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2858 } 2859 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2860 } 2861 return (mstate->dtms_ustackdepth); 2862 #endif 2863 2864 case DIF_VAR_CALLER: 2865 if (!dtrace_priv_kernel(state)) 2866 return (0); 2867 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2868 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2869 2870 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2871 /* 2872 * If this is an unanchored probe, we are 2873 * required to go through the slow path: 2874 * dtrace_caller() only guarantees correct 2875 * results for anchored probes. 2876 */ 2877 pc_t caller[2] = {0, 0}; 2878 2879 dtrace_getpcstack(caller, 2, aframes, 2880 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2881 mstate->dtms_caller = caller[1]; 2882 } else if ((mstate->dtms_caller = 2883 dtrace_caller(aframes)) == -1) { 2884 /* 2885 * We have failed to do this the quick way; 2886 * we must resort to the slower approach of 2887 * calling dtrace_getpcstack(). 2888 */ 2889 pc_t caller = 0; 2890 2891 dtrace_getpcstack(&caller, 1, aframes, NULL); 2892 mstate->dtms_caller = caller; 2893 } 2894 2895 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2896 } 2897 return (mstate->dtms_caller); 2898 2899 #if defined(sun) 2900 case DIF_VAR_UCALLER: 2901 if (!dtrace_priv_proc(state)) 2902 return (0); 2903 2904 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2905 uint64_t ustack[3]; 2906 2907 /* 2908 * dtrace_getupcstack() fills in the first uint64_t 2909 * with the current PID. The second uint64_t will 2910 * be the program counter at user-level. The third 2911 * uint64_t will contain the caller, which is what 2912 * we're after. 2913 */ 2914 ustack[2] = 0; 2915 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2916 dtrace_getupcstack(ustack, 3); 2917 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2918 mstate->dtms_ucaller = ustack[2]; 2919 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2920 } 2921 2922 return (mstate->dtms_ucaller); 2923 #endif 2924 2925 case DIF_VAR_PROBEPROV: 2926 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2927 return (dtrace_dif_varstr( 2928 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2929 state, mstate)); 2930 2931 case DIF_VAR_PROBEMOD: 2932 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2933 return (dtrace_dif_varstr( 2934 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2935 state, mstate)); 2936 2937 case DIF_VAR_PROBEFUNC: 2938 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2939 return (dtrace_dif_varstr( 2940 (uintptr_t)mstate->dtms_probe->dtpr_func, 2941 state, mstate)); 2942 2943 case DIF_VAR_PROBENAME: 2944 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2945 return (dtrace_dif_varstr( 2946 (uintptr_t)mstate->dtms_probe->dtpr_name, 2947 state, mstate)); 2948 2949 case DIF_VAR_PID: 2950 if (!dtrace_priv_proc(state)) 2951 return (0); 2952 2953 #if defined(sun) 2954 /* 2955 * Note that we are assuming that an unanchored probe is 2956 * always due to a high-level interrupt. (And we're assuming 2957 * that there is only a single high level interrupt.) 2958 */ 2959 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2960 return (pid0.pid_id); 2961 2962 /* 2963 * It is always safe to dereference one's own t_procp pointer: 2964 * it always points to a valid, allocated proc structure. 2965 * Further, it is always safe to dereference the p_pidp member 2966 * of one's own proc structure. (These are truisms becuase 2967 * threads and processes don't clean up their own state -- 2968 * they leave that task to whomever reaps them.) 2969 */ 2970 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2971 #else 2972 return ((uint64_t)curproc->p_pid); 2973 #endif 2974 2975 case DIF_VAR_PPID: 2976 if (!dtrace_priv_proc(state)) 2977 return (0); 2978 2979 #if defined(sun) 2980 /* 2981 * See comment in DIF_VAR_PID. 2982 */ 2983 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2984 return (pid0.pid_id); 2985 2986 /* 2987 * It is always safe to dereference one's own t_procp pointer: 2988 * it always points to a valid, allocated proc structure. 2989 * (This is true because threads don't clean up their own 2990 * state -- they leave that task to whomever reaps them.) 2991 */ 2992 return ((uint64_t)curthread->t_procp->p_ppid); 2993 #else 2994 return ((uint64_t)curproc->p_pptr->p_pid); 2995 #endif 2996 2997 case DIF_VAR_TID: 2998 #if defined(sun) 2999 /* 3000 * See comment in DIF_VAR_PID. 3001 */ 3002 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3003 return (0); 3004 #endif 3005 3006 return ((uint64_t)curthread->t_tid); 3007 3008 case DIF_VAR_EXECARGS: { 3009 struct pargs *p_args = curthread->td_proc->p_args; 3010 3011 if (p_args == NULL) 3012 return(0); 3013 3014 return (dtrace_dif_varstrz( 3015 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3016 } 3017 3018 case DIF_VAR_EXECNAME: 3019 #if defined(sun) 3020 if (!dtrace_priv_proc(state)) 3021 return (0); 3022 3023 /* 3024 * See comment in DIF_VAR_PID. 3025 */ 3026 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3027 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3028 3029 /* 3030 * It is always safe to dereference one's own t_procp pointer: 3031 * it always points to a valid, allocated proc structure. 3032 * (This is true because threads don't clean up their own 3033 * state -- they leave that task to whomever reaps them.) 3034 */ 3035 return (dtrace_dif_varstr( 3036 (uintptr_t)curthread->t_procp->p_user.u_comm, 3037 state, mstate)); 3038 #else 3039 return (dtrace_dif_varstr( 3040 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3041 #endif 3042 3043 case DIF_VAR_ZONENAME: 3044 #if defined(sun) 3045 if (!dtrace_priv_proc(state)) 3046 return (0); 3047 3048 /* 3049 * See comment in DIF_VAR_PID. 3050 */ 3051 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3052 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3053 3054 /* 3055 * It is always safe to dereference one's own t_procp pointer: 3056 * it always points to a valid, allocated proc structure. 3057 * (This is true because threads don't clean up their own 3058 * state -- they leave that task to whomever reaps them.) 3059 */ 3060 return (dtrace_dif_varstr( 3061 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3062 state, mstate)); 3063 #else 3064 return (0); 3065 #endif 3066 3067 case DIF_VAR_UID: 3068 if (!dtrace_priv_proc(state)) 3069 return (0); 3070 3071 #if defined(sun) 3072 /* 3073 * See comment in DIF_VAR_PID. 3074 */ 3075 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3076 return ((uint64_t)p0.p_cred->cr_uid); 3077 #endif 3078 3079 /* 3080 * It is always safe to dereference one's own t_procp pointer: 3081 * it always points to a valid, allocated proc structure. 3082 * (This is true because threads don't clean up their own 3083 * state -- they leave that task to whomever reaps them.) 3084 * 3085 * Additionally, it is safe to dereference one's own process 3086 * credential, since this is never NULL after process birth. 3087 */ 3088 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3089 3090 case DIF_VAR_GID: 3091 if (!dtrace_priv_proc(state)) 3092 return (0); 3093 3094 #if defined(sun) 3095 /* 3096 * See comment in DIF_VAR_PID. 3097 */ 3098 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3099 return ((uint64_t)p0.p_cred->cr_gid); 3100 #endif 3101 3102 /* 3103 * It is always safe to dereference one's own t_procp pointer: 3104 * it always points to a valid, allocated proc structure. 3105 * (This is true because threads don't clean up their own 3106 * state -- they leave that task to whomever reaps them.) 3107 * 3108 * Additionally, it is safe to dereference one's own process 3109 * credential, since this is never NULL after process birth. 3110 */ 3111 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3112 3113 case DIF_VAR_ERRNO: { 3114 #if defined(sun) 3115 klwp_t *lwp; 3116 if (!dtrace_priv_proc(state)) 3117 return (0); 3118 3119 /* 3120 * See comment in DIF_VAR_PID. 3121 */ 3122 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3123 return (0); 3124 3125 /* 3126 * It is always safe to dereference one's own t_lwp pointer in 3127 * the event that this pointer is non-NULL. (This is true 3128 * because threads and lwps don't clean up their own state -- 3129 * they leave that task to whomever reaps them.) 3130 */ 3131 if ((lwp = curthread->t_lwp) == NULL) 3132 return (0); 3133 3134 return ((uint64_t)lwp->lwp_errno); 3135 #else 3136 return (curthread->td_errno); 3137 #endif 3138 } 3139 default: 3140 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3141 return (0); 3142 } 3143 } 3144 3145 /* 3146 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3147 * Notice that we don't bother validating the proper number of arguments or 3148 * their types in the tuple stack. This isn't needed because all argument 3149 * interpretation is safe because of our load safety -- the worst that can 3150 * happen is that a bogus program can obtain bogus results. 3151 */ 3152 static void 3153 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3154 dtrace_key_t *tupregs, int nargs, 3155 dtrace_mstate_t *mstate, dtrace_state_t *state) 3156 { 3157 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3158 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3159 dtrace_vstate_t *vstate = &state->dts_vstate; 3160 3161 #if defined(sun) 3162 union { 3163 mutex_impl_t mi; 3164 uint64_t mx; 3165 } m; 3166 3167 union { 3168 krwlock_t ri; 3169 uintptr_t rw; 3170 } r; 3171 #else 3172 struct thread *lowner; 3173 union { 3174 struct lock_object *li; 3175 uintptr_t lx; 3176 } l; 3177 #endif 3178 3179 switch (subr) { 3180 case DIF_SUBR_RAND: 3181 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3182 break; 3183 3184 #if defined(sun) 3185 case DIF_SUBR_MUTEX_OWNED: 3186 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3187 mstate, vstate)) { 3188 regs[rd] = 0; 3189 break; 3190 } 3191 3192 m.mx = dtrace_load64(tupregs[0].dttk_value); 3193 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3194 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3195 else 3196 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3197 break; 3198 3199 case DIF_SUBR_MUTEX_OWNER: 3200 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3201 mstate, vstate)) { 3202 regs[rd] = 0; 3203 break; 3204 } 3205 3206 m.mx = dtrace_load64(tupregs[0].dttk_value); 3207 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3208 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3209 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3210 else 3211 regs[rd] = 0; 3212 break; 3213 3214 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3215 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3216 mstate, vstate)) { 3217 regs[rd] = 0; 3218 break; 3219 } 3220 3221 m.mx = dtrace_load64(tupregs[0].dttk_value); 3222 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3223 break; 3224 3225 case DIF_SUBR_MUTEX_TYPE_SPIN: 3226 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3227 mstate, vstate)) { 3228 regs[rd] = 0; 3229 break; 3230 } 3231 3232 m.mx = dtrace_load64(tupregs[0].dttk_value); 3233 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3234 break; 3235 3236 case DIF_SUBR_RW_READ_HELD: { 3237 uintptr_t tmp; 3238 3239 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3240 mstate, vstate)) { 3241 regs[rd] = 0; 3242 break; 3243 } 3244 3245 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3246 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3247 break; 3248 } 3249 3250 case DIF_SUBR_RW_WRITE_HELD: 3251 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3252 mstate, vstate)) { 3253 regs[rd] = 0; 3254 break; 3255 } 3256 3257 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3258 regs[rd] = _RW_WRITE_HELD(&r.ri); 3259 break; 3260 3261 case DIF_SUBR_RW_ISWRITER: 3262 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3263 mstate, vstate)) { 3264 regs[rd] = 0; 3265 break; 3266 } 3267 3268 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3269 regs[rd] = _RW_ISWRITER(&r.ri); 3270 break; 3271 3272 #else 3273 case DIF_SUBR_MUTEX_OWNED: 3274 if (!dtrace_canload(tupregs[0].dttk_value, 3275 sizeof (struct lock_object), mstate, vstate)) { 3276 regs[rd] = 0; 3277 break; 3278 } 3279 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3280 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3281 break; 3282 3283 case DIF_SUBR_MUTEX_OWNER: 3284 if (!dtrace_canload(tupregs[0].dttk_value, 3285 sizeof (struct lock_object), mstate, vstate)) { 3286 regs[rd] = 0; 3287 break; 3288 } 3289 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3290 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3291 regs[rd] = (uintptr_t)lowner; 3292 break; 3293 3294 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3295 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3296 mstate, vstate)) { 3297 regs[rd] = 0; 3298 break; 3299 } 3300 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3301 /* XXX - should be only LC_SLEEPABLE? */ 3302 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3303 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3304 break; 3305 3306 case DIF_SUBR_MUTEX_TYPE_SPIN: 3307 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3308 mstate, vstate)) { 3309 regs[rd] = 0; 3310 break; 3311 } 3312 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3313 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3314 break; 3315 3316 case DIF_SUBR_RW_READ_HELD: 3317 case DIF_SUBR_SX_SHARED_HELD: 3318 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3319 mstate, vstate)) { 3320 regs[rd] = 0; 3321 break; 3322 } 3323 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3324 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3325 lowner == NULL; 3326 break; 3327 3328 case DIF_SUBR_RW_WRITE_HELD: 3329 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3330 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3331 mstate, vstate)) { 3332 regs[rd] = 0; 3333 break; 3334 } 3335 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3336 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3337 regs[rd] = (lowner == curthread); 3338 break; 3339 3340 case DIF_SUBR_RW_ISWRITER: 3341 case DIF_SUBR_SX_ISEXCLUSIVE: 3342 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3343 mstate, vstate)) { 3344 regs[rd] = 0; 3345 break; 3346 } 3347 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3348 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3349 lowner != NULL; 3350 break; 3351 #endif /* ! defined(sun) */ 3352 3353 case DIF_SUBR_BCOPY: { 3354 /* 3355 * We need to be sure that the destination is in the scratch 3356 * region -- no other region is allowed. 3357 */ 3358 uintptr_t src = tupregs[0].dttk_value; 3359 uintptr_t dest = tupregs[1].dttk_value; 3360 size_t size = tupregs[2].dttk_value; 3361 3362 if (!dtrace_inscratch(dest, size, mstate)) { 3363 *flags |= CPU_DTRACE_BADADDR; 3364 *illval = regs[rd]; 3365 break; 3366 } 3367 3368 if (!dtrace_canload(src, size, mstate, vstate)) { 3369 regs[rd] = 0; 3370 break; 3371 } 3372 3373 dtrace_bcopy((void *)src, (void *)dest, size); 3374 break; 3375 } 3376 3377 case DIF_SUBR_ALLOCA: 3378 case DIF_SUBR_COPYIN: { 3379 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3380 uint64_t size = 3381 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3382 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3383 3384 /* 3385 * This action doesn't require any credential checks since 3386 * probes will not activate in user contexts to which the 3387 * enabling user does not have permissions. 3388 */ 3389 3390 /* 3391 * Rounding up the user allocation size could have overflowed 3392 * a large, bogus allocation (like -1ULL) to 0. 3393 */ 3394 if (scratch_size < size || 3395 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3396 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3397 regs[rd] = 0; 3398 break; 3399 } 3400 3401 if (subr == DIF_SUBR_COPYIN) { 3402 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3403 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3404 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3405 } 3406 3407 mstate->dtms_scratch_ptr += scratch_size; 3408 regs[rd] = dest; 3409 break; 3410 } 3411 3412 case DIF_SUBR_COPYINTO: { 3413 uint64_t size = tupregs[1].dttk_value; 3414 uintptr_t dest = tupregs[2].dttk_value; 3415 3416 /* 3417 * This action doesn't require any credential checks since 3418 * probes will not activate in user contexts to which the 3419 * enabling user does not have permissions. 3420 */ 3421 if (!dtrace_inscratch(dest, size, mstate)) { 3422 *flags |= CPU_DTRACE_BADADDR; 3423 *illval = regs[rd]; 3424 break; 3425 } 3426 3427 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3428 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3429 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3430 break; 3431 } 3432 3433 case DIF_SUBR_COPYINSTR: { 3434 uintptr_t dest = mstate->dtms_scratch_ptr; 3435 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3436 3437 if (nargs > 1 && tupregs[1].dttk_value < size) 3438 size = tupregs[1].dttk_value + 1; 3439 3440 /* 3441 * This action doesn't require any credential checks since 3442 * probes will not activate in user contexts to which the 3443 * enabling user does not have permissions. 3444 */ 3445 if (!DTRACE_INSCRATCH(mstate, size)) { 3446 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3447 regs[rd] = 0; 3448 break; 3449 } 3450 3451 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3452 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3453 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3454 3455 ((char *)dest)[size - 1] = '\0'; 3456 mstate->dtms_scratch_ptr += size; 3457 regs[rd] = dest; 3458 break; 3459 } 3460 3461 #if defined(sun) 3462 case DIF_SUBR_MSGSIZE: 3463 case DIF_SUBR_MSGDSIZE: { 3464 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3465 uintptr_t wptr, rptr; 3466 size_t count = 0; 3467 int cont = 0; 3468 3469 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3470 3471 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3472 vstate)) { 3473 regs[rd] = 0; 3474 break; 3475 } 3476 3477 wptr = dtrace_loadptr(baddr + 3478 offsetof(mblk_t, b_wptr)); 3479 3480 rptr = dtrace_loadptr(baddr + 3481 offsetof(mblk_t, b_rptr)); 3482 3483 if (wptr < rptr) { 3484 *flags |= CPU_DTRACE_BADADDR; 3485 *illval = tupregs[0].dttk_value; 3486 break; 3487 } 3488 3489 daddr = dtrace_loadptr(baddr + 3490 offsetof(mblk_t, b_datap)); 3491 3492 baddr = dtrace_loadptr(baddr + 3493 offsetof(mblk_t, b_cont)); 3494 3495 /* 3496 * We want to prevent against denial-of-service here, 3497 * so we're only going to search the list for 3498 * dtrace_msgdsize_max mblks. 3499 */ 3500 if (cont++ > dtrace_msgdsize_max) { 3501 *flags |= CPU_DTRACE_ILLOP; 3502 break; 3503 } 3504 3505 if (subr == DIF_SUBR_MSGDSIZE) { 3506 if (dtrace_load8(daddr + 3507 offsetof(dblk_t, db_type)) != M_DATA) 3508 continue; 3509 } 3510 3511 count += wptr - rptr; 3512 } 3513 3514 if (!(*flags & CPU_DTRACE_FAULT)) 3515 regs[rd] = count; 3516 3517 break; 3518 } 3519 #endif 3520 3521 case DIF_SUBR_PROGENYOF: { 3522 pid_t pid = tupregs[0].dttk_value; 3523 proc_t *p; 3524 int rval = 0; 3525 3526 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3527 3528 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3529 #if defined(sun) 3530 if (p->p_pidp->pid_id == pid) { 3531 #else 3532 if (p->p_pid == pid) { 3533 #endif 3534 rval = 1; 3535 break; 3536 } 3537 } 3538 3539 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3540 3541 regs[rd] = rval; 3542 break; 3543 } 3544 3545 case DIF_SUBR_SPECULATION: 3546 regs[rd] = dtrace_speculation(state); 3547 break; 3548 3549 case DIF_SUBR_COPYOUT: { 3550 uintptr_t kaddr = tupregs[0].dttk_value; 3551 uintptr_t uaddr = tupregs[1].dttk_value; 3552 uint64_t size = tupregs[2].dttk_value; 3553 3554 if (!dtrace_destructive_disallow && 3555 dtrace_priv_proc_control(state) && 3556 !dtrace_istoxic(kaddr, size)) { 3557 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3558 dtrace_copyout(kaddr, uaddr, size, flags); 3559 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3560 } 3561 break; 3562 } 3563 3564 case DIF_SUBR_COPYOUTSTR: { 3565 uintptr_t kaddr = tupregs[0].dttk_value; 3566 uintptr_t uaddr = tupregs[1].dttk_value; 3567 uint64_t size = tupregs[2].dttk_value; 3568 3569 if (!dtrace_destructive_disallow && 3570 dtrace_priv_proc_control(state) && 3571 !dtrace_istoxic(kaddr, size)) { 3572 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3573 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3574 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3575 } 3576 break; 3577 } 3578 3579 case DIF_SUBR_STRLEN: { 3580 size_t sz; 3581 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3582 sz = dtrace_strlen((char *)addr, 3583 state->dts_options[DTRACEOPT_STRSIZE]); 3584 3585 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3586 regs[rd] = 0; 3587 break; 3588 } 3589 3590 regs[rd] = sz; 3591 3592 break; 3593 } 3594 3595 case DIF_SUBR_STRCHR: 3596 case DIF_SUBR_STRRCHR: { 3597 /* 3598 * We're going to iterate over the string looking for the 3599 * specified character. We will iterate until we have reached 3600 * the string length or we have found the character. If this 3601 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3602 * of the specified character instead of the first. 3603 */ 3604 uintptr_t saddr = tupregs[0].dttk_value; 3605 uintptr_t addr = tupregs[0].dttk_value; 3606 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3607 char c, target = (char)tupregs[1].dttk_value; 3608 3609 for (regs[rd] = 0; addr < limit; addr++) { 3610 if ((c = dtrace_load8(addr)) == target) { 3611 regs[rd] = addr; 3612 3613 if (subr == DIF_SUBR_STRCHR) 3614 break; 3615 } 3616 3617 if (c == '\0') 3618 break; 3619 } 3620 3621 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3622 regs[rd] = 0; 3623 break; 3624 } 3625 3626 break; 3627 } 3628 3629 case DIF_SUBR_STRSTR: 3630 case DIF_SUBR_INDEX: 3631 case DIF_SUBR_RINDEX: { 3632 /* 3633 * We're going to iterate over the string looking for the 3634 * specified string. We will iterate until we have reached 3635 * the string length or we have found the string. (Yes, this 3636 * is done in the most naive way possible -- but considering 3637 * that the string we're searching for is likely to be 3638 * relatively short, the complexity of Rabin-Karp or similar 3639 * hardly seems merited.) 3640 */ 3641 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3642 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3643 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3644 size_t len = dtrace_strlen(addr, size); 3645 size_t sublen = dtrace_strlen(substr, size); 3646 char *limit = addr + len, *orig = addr; 3647 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3648 int inc = 1; 3649 3650 regs[rd] = notfound; 3651 3652 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3653 regs[rd] = 0; 3654 break; 3655 } 3656 3657 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3658 vstate)) { 3659 regs[rd] = 0; 3660 break; 3661 } 3662 3663 /* 3664 * strstr() and index()/rindex() have similar semantics if 3665 * both strings are the empty string: strstr() returns a 3666 * pointer to the (empty) string, and index() and rindex() 3667 * both return index 0 (regardless of any position argument). 3668 */ 3669 if (sublen == 0 && len == 0) { 3670 if (subr == DIF_SUBR_STRSTR) 3671 regs[rd] = (uintptr_t)addr; 3672 else 3673 regs[rd] = 0; 3674 break; 3675 } 3676 3677 if (subr != DIF_SUBR_STRSTR) { 3678 if (subr == DIF_SUBR_RINDEX) { 3679 limit = orig - 1; 3680 addr += len; 3681 inc = -1; 3682 } 3683 3684 /* 3685 * Both index() and rindex() take an optional position 3686 * argument that denotes the starting position. 3687 */ 3688 if (nargs == 3) { 3689 int64_t pos = (int64_t)tupregs[2].dttk_value; 3690 3691 /* 3692 * If the position argument to index() is 3693 * negative, Perl implicitly clamps it at 3694 * zero. This semantic is a little surprising 3695 * given the special meaning of negative 3696 * positions to similar Perl functions like 3697 * substr(), but it appears to reflect a 3698 * notion that index() can start from a 3699 * negative index and increment its way up to 3700 * the string. Given this notion, Perl's 3701 * rindex() is at least self-consistent in 3702 * that it implicitly clamps positions greater 3703 * than the string length to be the string 3704 * length. Where Perl completely loses 3705 * coherence, however, is when the specified 3706 * substring is the empty string (""). In 3707 * this case, even if the position is 3708 * negative, rindex() returns 0 -- and even if 3709 * the position is greater than the length, 3710 * index() returns the string length. These 3711 * semantics violate the notion that index() 3712 * should never return a value less than the 3713 * specified position and that rindex() should 3714 * never return a value greater than the 3715 * specified position. (One assumes that 3716 * these semantics are artifacts of Perl's 3717 * implementation and not the results of 3718 * deliberate design -- it beggars belief that 3719 * even Larry Wall could desire such oddness.) 3720 * While in the abstract one would wish for 3721 * consistent position semantics across 3722 * substr(), index() and rindex() -- or at the 3723 * very least self-consistent position 3724 * semantics for index() and rindex() -- we 3725 * instead opt to keep with the extant Perl 3726 * semantics, in all their broken glory. (Do 3727 * we have more desire to maintain Perl's 3728 * semantics than Perl does? Probably.) 3729 */ 3730 if (subr == DIF_SUBR_RINDEX) { 3731 if (pos < 0) { 3732 if (sublen == 0) 3733 regs[rd] = 0; 3734 break; 3735 } 3736 3737 if (pos > len) 3738 pos = len; 3739 } else { 3740 if (pos < 0) 3741 pos = 0; 3742 3743 if (pos >= len) { 3744 if (sublen == 0) 3745 regs[rd] = len; 3746 break; 3747 } 3748 } 3749 3750 addr = orig + pos; 3751 } 3752 } 3753 3754 for (regs[rd] = notfound; addr != limit; addr += inc) { 3755 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3756 if (subr != DIF_SUBR_STRSTR) { 3757 /* 3758 * As D index() and rindex() are 3759 * modeled on Perl (and not on awk), 3760 * we return a zero-based (and not a 3761 * one-based) index. (For you Perl 3762 * weenies: no, we're not going to add 3763 * $[ -- and shouldn't you be at a con 3764 * or something?) 3765 */ 3766 regs[rd] = (uintptr_t)(addr - orig); 3767 break; 3768 } 3769 3770 ASSERT(subr == DIF_SUBR_STRSTR); 3771 regs[rd] = (uintptr_t)addr; 3772 break; 3773 } 3774 } 3775 3776 break; 3777 } 3778 3779 case DIF_SUBR_STRTOK: { 3780 uintptr_t addr = tupregs[0].dttk_value; 3781 uintptr_t tokaddr = tupregs[1].dttk_value; 3782 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3783 uintptr_t limit, toklimit = tokaddr + size; 3784 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3785 char *dest = (char *)mstate->dtms_scratch_ptr; 3786 int i; 3787 3788 /* 3789 * Check both the token buffer and (later) the input buffer, 3790 * since both could be non-scratch addresses. 3791 */ 3792 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3793 regs[rd] = 0; 3794 break; 3795 } 3796 3797 if (!DTRACE_INSCRATCH(mstate, size)) { 3798 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3799 regs[rd] = 0; 3800 break; 3801 } 3802 3803 if (addr == 0) { 3804 /* 3805 * If the address specified is NULL, we use our saved 3806 * strtok pointer from the mstate. Note that this 3807 * means that the saved strtok pointer is _only_ 3808 * valid within multiple enablings of the same probe -- 3809 * it behaves like an implicit clause-local variable. 3810 */ 3811 addr = mstate->dtms_strtok; 3812 } else { 3813 /* 3814 * If the user-specified address is non-NULL we must 3815 * access check it. This is the only time we have 3816 * a chance to do so, since this address may reside 3817 * in the string table of this clause-- future calls 3818 * (when we fetch addr from mstate->dtms_strtok) 3819 * would fail this access check. 3820 */ 3821 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3822 regs[rd] = 0; 3823 break; 3824 } 3825 } 3826 3827 /* 3828 * First, zero the token map, and then process the token 3829 * string -- setting a bit in the map for every character 3830 * found in the token string. 3831 */ 3832 for (i = 0; i < sizeof (tokmap); i++) 3833 tokmap[i] = 0; 3834 3835 for (; tokaddr < toklimit; tokaddr++) { 3836 if ((c = dtrace_load8(tokaddr)) == '\0') 3837 break; 3838 3839 ASSERT((c >> 3) < sizeof (tokmap)); 3840 tokmap[c >> 3] |= (1 << (c & 0x7)); 3841 } 3842 3843 for (limit = addr + size; addr < limit; addr++) { 3844 /* 3845 * We're looking for a character that is _not_ contained 3846 * in the token string. 3847 */ 3848 if ((c = dtrace_load8(addr)) == '\0') 3849 break; 3850 3851 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3852 break; 3853 } 3854 3855 if (c == '\0') { 3856 /* 3857 * We reached the end of the string without finding 3858 * any character that was not in the token string. 3859 * We return NULL in this case, and we set the saved 3860 * address to NULL as well. 3861 */ 3862 regs[rd] = 0; 3863 mstate->dtms_strtok = 0; 3864 break; 3865 } 3866 3867 /* 3868 * From here on, we're copying into the destination string. 3869 */ 3870 for (i = 0; addr < limit && i < size - 1; addr++) { 3871 if ((c = dtrace_load8(addr)) == '\0') 3872 break; 3873 3874 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3875 break; 3876 3877 ASSERT(i < size); 3878 dest[i++] = c; 3879 } 3880 3881 ASSERT(i < size); 3882 dest[i] = '\0'; 3883 regs[rd] = (uintptr_t)dest; 3884 mstate->dtms_scratch_ptr += size; 3885 mstate->dtms_strtok = addr; 3886 break; 3887 } 3888 3889 case DIF_SUBR_SUBSTR: { 3890 uintptr_t s = tupregs[0].dttk_value; 3891 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3892 char *d = (char *)mstate->dtms_scratch_ptr; 3893 int64_t index = (int64_t)tupregs[1].dttk_value; 3894 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3895 size_t len = dtrace_strlen((char *)s, size); 3896 int64_t i = 0; 3897 3898 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3899 regs[rd] = 0; 3900 break; 3901 } 3902 3903 if (!DTRACE_INSCRATCH(mstate, size)) { 3904 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3905 regs[rd] = 0; 3906 break; 3907 } 3908 3909 if (nargs <= 2) 3910 remaining = (int64_t)size; 3911 3912 if (index < 0) { 3913 index += len; 3914 3915 if (index < 0 && index + remaining > 0) { 3916 remaining += index; 3917 index = 0; 3918 } 3919 } 3920 3921 if (index >= len || index < 0) { 3922 remaining = 0; 3923 } else if (remaining < 0) { 3924 remaining += len - index; 3925 } else if (index + remaining > size) { 3926 remaining = size - index; 3927 } 3928 3929 for (i = 0; i < remaining; i++) { 3930 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3931 break; 3932 } 3933 3934 d[i] = '\0'; 3935 3936 mstate->dtms_scratch_ptr += size; 3937 regs[rd] = (uintptr_t)d; 3938 break; 3939 } 3940 3941 #if defined(sun) 3942 case DIF_SUBR_GETMAJOR: 3943 #ifdef _LP64 3944 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3945 #else 3946 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3947 #endif 3948 break; 3949 3950 case DIF_SUBR_GETMINOR: 3951 #ifdef _LP64 3952 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3953 #else 3954 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3955 #endif 3956 break; 3957 3958 case DIF_SUBR_DDI_PATHNAME: { 3959 /* 3960 * This one is a galactic mess. We are going to roughly 3961 * emulate ddi_pathname(), but it's made more complicated 3962 * by the fact that we (a) want to include the minor name and 3963 * (b) must proceed iteratively instead of recursively. 3964 */ 3965 uintptr_t dest = mstate->dtms_scratch_ptr; 3966 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3967 char *start = (char *)dest, *end = start + size - 1; 3968 uintptr_t daddr = tupregs[0].dttk_value; 3969 int64_t minor = (int64_t)tupregs[1].dttk_value; 3970 char *s; 3971 int i, len, depth = 0; 3972 3973 /* 3974 * Due to all the pointer jumping we do and context we must 3975 * rely upon, we just mandate that the user must have kernel 3976 * read privileges to use this routine. 3977 */ 3978 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3979 *flags |= CPU_DTRACE_KPRIV; 3980 *illval = daddr; 3981 regs[rd] = 0; 3982 } 3983 3984 if (!DTRACE_INSCRATCH(mstate, size)) { 3985 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3986 regs[rd] = 0; 3987 break; 3988 } 3989 3990 *end = '\0'; 3991 3992 /* 3993 * We want to have a name for the minor. In order to do this, 3994 * we need to walk the minor list from the devinfo. We want 3995 * to be sure that we don't infinitely walk a circular list, 3996 * so we check for circularity by sending a scout pointer 3997 * ahead two elements for every element that we iterate over; 3998 * if the list is circular, these will ultimately point to the 3999 * same element. You may recognize this little trick as the 4000 * answer to a stupid interview question -- one that always 4001 * seems to be asked by those who had to have it laboriously 4002 * explained to them, and who can't even concisely describe 4003 * the conditions under which one would be forced to resort to 4004 * this technique. Needless to say, those conditions are 4005 * found here -- and probably only here. Is this the only use 4006 * of this infamous trick in shipping, production code? If it 4007 * isn't, it probably should be... 4008 */ 4009 if (minor != -1) { 4010 uintptr_t maddr = dtrace_loadptr(daddr + 4011 offsetof(struct dev_info, devi_minor)); 4012 4013 uintptr_t next = offsetof(struct ddi_minor_data, next); 4014 uintptr_t name = offsetof(struct ddi_minor_data, 4015 d_minor) + offsetof(struct ddi_minor, name); 4016 uintptr_t dev = offsetof(struct ddi_minor_data, 4017 d_minor) + offsetof(struct ddi_minor, dev); 4018 uintptr_t scout; 4019 4020 if (maddr != NULL) 4021 scout = dtrace_loadptr(maddr + next); 4022 4023 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4024 uint64_t m; 4025 #ifdef _LP64 4026 m = dtrace_load64(maddr + dev) & MAXMIN64; 4027 #else 4028 m = dtrace_load32(maddr + dev) & MAXMIN; 4029 #endif 4030 if (m != minor) { 4031 maddr = dtrace_loadptr(maddr + next); 4032 4033 if (scout == NULL) 4034 continue; 4035 4036 scout = dtrace_loadptr(scout + next); 4037 4038 if (scout == NULL) 4039 continue; 4040 4041 scout = dtrace_loadptr(scout + next); 4042 4043 if (scout == NULL) 4044 continue; 4045 4046 if (scout == maddr) { 4047 *flags |= CPU_DTRACE_ILLOP; 4048 break; 4049 } 4050 4051 continue; 4052 } 4053 4054 /* 4055 * We have the minor data. Now we need to 4056 * copy the minor's name into the end of the 4057 * pathname. 4058 */ 4059 s = (char *)dtrace_loadptr(maddr + name); 4060 len = dtrace_strlen(s, size); 4061 4062 if (*flags & CPU_DTRACE_FAULT) 4063 break; 4064 4065 if (len != 0) { 4066 if ((end -= (len + 1)) < start) 4067 break; 4068 4069 *end = ':'; 4070 } 4071 4072 for (i = 1; i <= len; i++) 4073 end[i] = dtrace_load8((uintptr_t)s++); 4074 break; 4075 } 4076 } 4077 4078 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4079 ddi_node_state_t devi_state; 4080 4081 devi_state = dtrace_load32(daddr + 4082 offsetof(struct dev_info, devi_node_state)); 4083 4084 if (*flags & CPU_DTRACE_FAULT) 4085 break; 4086 4087 if (devi_state >= DS_INITIALIZED) { 4088 s = (char *)dtrace_loadptr(daddr + 4089 offsetof(struct dev_info, devi_addr)); 4090 len = dtrace_strlen(s, size); 4091 4092 if (*flags & CPU_DTRACE_FAULT) 4093 break; 4094 4095 if (len != 0) { 4096 if ((end -= (len + 1)) < start) 4097 break; 4098 4099 *end = '@'; 4100 } 4101 4102 for (i = 1; i <= len; i++) 4103 end[i] = dtrace_load8((uintptr_t)s++); 4104 } 4105 4106 /* 4107 * Now for the node name... 4108 */ 4109 s = (char *)dtrace_loadptr(daddr + 4110 offsetof(struct dev_info, devi_node_name)); 4111 4112 daddr = dtrace_loadptr(daddr + 4113 offsetof(struct dev_info, devi_parent)); 4114 4115 /* 4116 * If our parent is NULL (that is, if we're the root 4117 * node), we're going to use the special path 4118 * "devices". 4119 */ 4120 if (daddr == 0) 4121 s = "devices"; 4122 4123 len = dtrace_strlen(s, size); 4124 if (*flags & CPU_DTRACE_FAULT) 4125 break; 4126 4127 if ((end -= (len + 1)) < start) 4128 break; 4129 4130 for (i = 1; i <= len; i++) 4131 end[i] = dtrace_load8((uintptr_t)s++); 4132 *end = '/'; 4133 4134 if (depth++ > dtrace_devdepth_max) { 4135 *flags |= CPU_DTRACE_ILLOP; 4136 break; 4137 } 4138 } 4139 4140 if (end < start) 4141 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4142 4143 if (daddr == 0) { 4144 regs[rd] = (uintptr_t)end; 4145 mstate->dtms_scratch_ptr += size; 4146 } 4147 4148 break; 4149 } 4150 #endif 4151 4152 case DIF_SUBR_STRJOIN: { 4153 char *d = (char *)mstate->dtms_scratch_ptr; 4154 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4155 uintptr_t s1 = tupregs[0].dttk_value; 4156 uintptr_t s2 = tupregs[1].dttk_value; 4157 int i = 0; 4158 4159 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4160 !dtrace_strcanload(s2, size, mstate, vstate)) { 4161 regs[rd] = 0; 4162 break; 4163 } 4164 4165 if (!DTRACE_INSCRATCH(mstate, size)) { 4166 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4167 regs[rd] = 0; 4168 break; 4169 } 4170 4171 for (;;) { 4172 if (i >= size) { 4173 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4174 regs[rd] = 0; 4175 break; 4176 } 4177 4178 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4179 i--; 4180 break; 4181 } 4182 } 4183 4184 for (;;) { 4185 if (i >= size) { 4186 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4187 regs[rd] = 0; 4188 break; 4189 } 4190 4191 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4192 break; 4193 } 4194 4195 if (i < size) { 4196 mstate->dtms_scratch_ptr += i; 4197 regs[rd] = (uintptr_t)d; 4198 } 4199 4200 break; 4201 } 4202 4203 case DIF_SUBR_LLTOSTR: { 4204 int64_t i = (int64_t)tupregs[0].dttk_value; 4205 int64_t val = i < 0 ? i * -1 : i; 4206 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4207 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4208 4209 if (!DTRACE_INSCRATCH(mstate, size)) { 4210 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4211 regs[rd] = 0; 4212 break; 4213 } 4214 4215 for (*end-- = '\0'; val; val /= 10) 4216 *end-- = '0' + (val % 10); 4217 4218 if (i == 0) 4219 *end-- = '0'; 4220 4221 if (i < 0) 4222 *end-- = '-'; 4223 4224 regs[rd] = (uintptr_t)end + 1; 4225 mstate->dtms_scratch_ptr += size; 4226 break; 4227 } 4228 4229 case DIF_SUBR_HTONS: 4230 case DIF_SUBR_NTOHS: 4231 #if BYTE_ORDER == BIG_ENDIAN 4232 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4233 #else 4234 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4235 #endif 4236 break; 4237 4238 4239 case DIF_SUBR_HTONL: 4240 case DIF_SUBR_NTOHL: 4241 #if BYTE_ORDER == BIG_ENDIAN 4242 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4243 #else 4244 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4245 #endif 4246 break; 4247 4248 4249 case DIF_SUBR_HTONLL: 4250 case DIF_SUBR_NTOHLL: 4251 #if BYTE_ORDER == BIG_ENDIAN 4252 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4253 #else 4254 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4255 #endif 4256 break; 4257 4258 4259 case DIF_SUBR_DIRNAME: 4260 case DIF_SUBR_BASENAME: { 4261 char *dest = (char *)mstate->dtms_scratch_ptr; 4262 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4263 uintptr_t src = tupregs[0].dttk_value; 4264 int i, j, len = dtrace_strlen((char *)src, size); 4265 int lastbase = -1, firstbase = -1, lastdir = -1; 4266 int start, end; 4267 4268 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4269 regs[rd] = 0; 4270 break; 4271 } 4272 4273 if (!DTRACE_INSCRATCH(mstate, size)) { 4274 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4275 regs[rd] = 0; 4276 break; 4277 } 4278 4279 /* 4280 * The basename and dirname for a zero-length string is 4281 * defined to be "." 4282 */ 4283 if (len == 0) { 4284 len = 1; 4285 src = (uintptr_t)"."; 4286 } 4287 4288 /* 4289 * Start from the back of the string, moving back toward the 4290 * front until we see a character that isn't a slash. That 4291 * character is the last character in the basename. 4292 */ 4293 for (i = len - 1; i >= 0; i--) { 4294 if (dtrace_load8(src + i) != '/') 4295 break; 4296 } 4297 4298 if (i >= 0) 4299 lastbase = i; 4300 4301 /* 4302 * Starting from the last character in the basename, move 4303 * towards the front until we find a slash. The character 4304 * that we processed immediately before that is the first 4305 * character in the basename. 4306 */ 4307 for (; i >= 0; i--) { 4308 if (dtrace_load8(src + i) == '/') 4309 break; 4310 } 4311 4312 if (i >= 0) 4313 firstbase = i + 1; 4314 4315 /* 4316 * Now keep going until we find a non-slash character. That 4317 * character is the last character in the dirname. 4318 */ 4319 for (; i >= 0; i--) { 4320 if (dtrace_load8(src + i) != '/') 4321 break; 4322 } 4323 4324 if (i >= 0) 4325 lastdir = i; 4326 4327 ASSERT(!(lastbase == -1 && firstbase != -1)); 4328 ASSERT(!(firstbase == -1 && lastdir != -1)); 4329 4330 if (lastbase == -1) { 4331 /* 4332 * We didn't find a non-slash character. We know that 4333 * the length is non-zero, so the whole string must be 4334 * slashes. In either the dirname or the basename 4335 * case, we return '/'. 4336 */ 4337 ASSERT(firstbase == -1); 4338 firstbase = lastbase = lastdir = 0; 4339 } 4340 4341 if (firstbase == -1) { 4342 /* 4343 * The entire string consists only of a basename 4344 * component. If we're looking for dirname, we need 4345 * to change our string to be just "."; if we're 4346 * looking for a basename, we'll just set the first 4347 * character of the basename to be 0. 4348 */ 4349 if (subr == DIF_SUBR_DIRNAME) { 4350 ASSERT(lastdir == -1); 4351 src = (uintptr_t)"."; 4352 lastdir = 0; 4353 } else { 4354 firstbase = 0; 4355 } 4356 } 4357 4358 if (subr == DIF_SUBR_DIRNAME) { 4359 if (lastdir == -1) { 4360 /* 4361 * We know that we have a slash in the name -- 4362 * or lastdir would be set to 0, above. And 4363 * because lastdir is -1, we know that this 4364 * slash must be the first character. (That 4365 * is, the full string must be of the form 4366 * "/basename".) In this case, the last 4367 * character of the directory name is 0. 4368 */ 4369 lastdir = 0; 4370 } 4371 4372 start = 0; 4373 end = lastdir; 4374 } else { 4375 ASSERT(subr == DIF_SUBR_BASENAME); 4376 ASSERT(firstbase != -1 && lastbase != -1); 4377 start = firstbase; 4378 end = lastbase; 4379 } 4380 4381 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4382 dest[j] = dtrace_load8(src + i); 4383 4384 dest[j] = '\0'; 4385 regs[rd] = (uintptr_t)dest; 4386 mstate->dtms_scratch_ptr += size; 4387 break; 4388 } 4389 4390 case DIF_SUBR_CLEANPATH: { 4391 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4392 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4393 uintptr_t src = tupregs[0].dttk_value; 4394 int i = 0, j = 0; 4395 4396 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4397 regs[rd] = 0; 4398 break; 4399 } 4400 4401 if (!DTRACE_INSCRATCH(mstate, size)) { 4402 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4403 regs[rd] = 0; 4404 break; 4405 } 4406 4407 /* 4408 * Move forward, loading each character. 4409 */ 4410 do { 4411 c = dtrace_load8(src + i++); 4412 next: 4413 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4414 break; 4415 4416 if (c != '/') { 4417 dest[j++] = c; 4418 continue; 4419 } 4420 4421 c = dtrace_load8(src + i++); 4422 4423 if (c == '/') { 4424 /* 4425 * We have two slashes -- we can just advance 4426 * to the next character. 4427 */ 4428 goto next; 4429 } 4430 4431 if (c != '.') { 4432 /* 4433 * This is not "." and it's not ".." -- we can 4434 * just store the "/" and this character and 4435 * drive on. 4436 */ 4437 dest[j++] = '/'; 4438 dest[j++] = c; 4439 continue; 4440 } 4441 4442 c = dtrace_load8(src + i++); 4443 4444 if (c == '/') { 4445 /* 4446 * This is a "/./" component. We're not going 4447 * to store anything in the destination buffer; 4448 * we're just going to go to the next component. 4449 */ 4450 goto next; 4451 } 4452 4453 if (c != '.') { 4454 /* 4455 * This is not ".." -- we can just store the 4456 * "/." and this character and continue 4457 * processing. 4458 */ 4459 dest[j++] = '/'; 4460 dest[j++] = '.'; 4461 dest[j++] = c; 4462 continue; 4463 } 4464 4465 c = dtrace_load8(src + i++); 4466 4467 if (c != '/' && c != '\0') { 4468 /* 4469 * This is not ".." -- it's "..[mumble]". 4470 * We'll store the "/.." and this character 4471 * and continue processing. 4472 */ 4473 dest[j++] = '/'; 4474 dest[j++] = '.'; 4475 dest[j++] = '.'; 4476 dest[j++] = c; 4477 continue; 4478 } 4479 4480 /* 4481 * This is "/../" or "/..\0". We need to back up 4482 * our destination pointer until we find a "/". 4483 */ 4484 i--; 4485 while (j != 0 && dest[--j] != '/') 4486 continue; 4487 4488 if (c == '\0') 4489 dest[++j] = '/'; 4490 } while (c != '\0'); 4491 4492 dest[j] = '\0'; 4493 regs[rd] = (uintptr_t)dest; 4494 mstate->dtms_scratch_ptr += size; 4495 break; 4496 } 4497 4498 case DIF_SUBR_INET_NTOA: 4499 case DIF_SUBR_INET_NTOA6: 4500 case DIF_SUBR_INET_NTOP: { 4501 size_t size; 4502 int af, argi, i; 4503 char *base, *end; 4504 4505 if (subr == DIF_SUBR_INET_NTOP) { 4506 af = (int)tupregs[0].dttk_value; 4507 argi = 1; 4508 } else { 4509 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4510 argi = 0; 4511 } 4512 4513 if (af == AF_INET) { 4514 ipaddr_t ip4; 4515 uint8_t *ptr8, val; 4516 4517 /* 4518 * Safely load the IPv4 address. 4519 */ 4520 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4521 4522 /* 4523 * Check an IPv4 string will fit in scratch. 4524 */ 4525 size = INET_ADDRSTRLEN; 4526 if (!DTRACE_INSCRATCH(mstate, size)) { 4527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4528 regs[rd] = 0; 4529 break; 4530 } 4531 base = (char *)mstate->dtms_scratch_ptr; 4532 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4533 4534 /* 4535 * Stringify as a dotted decimal quad. 4536 */ 4537 *end-- = '\0'; 4538 ptr8 = (uint8_t *)&ip4; 4539 for (i = 3; i >= 0; i--) { 4540 val = ptr8[i]; 4541 4542 if (val == 0) { 4543 *end-- = '0'; 4544 } else { 4545 for (; val; val /= 10) { 4546 *end-- = '0' + (val % 10); 4547 } 4548 } 4549 4550 if (i > 0) 4551 *end-- = '.'; 4552 } 4553 ASSERT(end + 1 >= base); 4554 4555 } else if (af == AF_INET6) { 4556 struct in6_addr ip6; 4557 int firstzero, tryzero, numzero, v6end; 4558 uint16_t val; 4559 const char digits[] = "0123456789abcdef"; 4560 4561 /* 4562 * Stringify using RFC 1884 convention 2 - 16 bit 4563 * hexadecimal values with a zero-run compression. 4564 * Lower case hexadecimal digits are used. 4565 * eg, fe80::214:4fff:fe0b:76c8. 4566 * The IPv4 embedded form is returned for inet_ntop, 4567 * just the IPv4 string is returned for inet_ntoa6. 4568 */ 4569 4570 /* 4571 * Safely load the IPv6 address. 4572 */ 4573 dtrace_bcopy( 4574 (void *)(uintptr_t)tupregs[argi].dttk_value, 4575 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4576 4577 /* 4578 * Check an IPv6 string will fit in scratch. 4579 */ 4580 size = INET6_ADDRSTRLEN; 4581 if (!DTRACE_INSCRATCH(mstate, size)) { 4582 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4583 regs[rd] = 0; 4584 break; 4585 } 4586 base = (char *)mstate->dtms_scratch_ptr; 4587 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4588 *end-- = '\0'; 4589 4590 /* 4591 * Find the longest run of 16 bit zero values 4592 * for the single allowed zero compression - "::". 4593 */ 4594 firstzero = -1; 4595 tryzero = -1; 4596 numzero = 1; 4597 for (i = 0; i < sizeof (struct in6_addr); i++) { 4598 #if defined(sun) 4599 if (ip6._S6_un._S6_u8[i] == 0 && 4600 #else 4601 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4602 #endif 4603 tryzero == -1 && i % 2 == 0) { 4604 tryzero = i; 4605 continue; 4606 } 4607 4608 if (tryzero != -1 && 4609 #if defined(sun) 4610 (ip6._S6_un._S6_u8[i] != 0 || 4611 #else 4612 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4613 #endif 4614 i == sizeof (struct in6_addr) - 1)) { 4615 4616 if (i - tryzero <= numzero) { 4617 tryzero = -1; 4618 continue; 4619 } 4620 4621 firstzero = tryzero; 4622 numzero = i - i % 2 - tryzero; 4623 tryzero = -1; 4624 4625 #if defined(sun) 4626 if (ip6._S6_un._S6_u8[i] == 0 && 4627 #else 4628 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4629 #endif 4630 i == sizeof (struct in6_addr) - 1) 4631 numzero += 2; 4632 } 4633 } 4634 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4635 4636 /* 4637 * Check for an IPv4 embedded address. 4638 */ 4639 v6end = sizeof (struct in6_addr) - 2; 4640 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4641 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4642 for (i = sizeof (struct in6_addr) - 1; 4643 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4644 ASSERT(end >= base); 4645 4646 #if defined(sun) 4647 val = ip6._S6_un._S6_u8[i]; 4648 #else 4649 val = ip6.__u6_addr.__u6_addr8[i]; 4650 #endif 4651 4652 if (val == 0) { 4653 *end-- = '0'; 4654 } else { 4655 for (; val; val /= 10) { 4656 *end-- = '0' + val % 10; 4657 } 4658 } 4659 4660 if (i > DTRACE_V4MAPPED_OFFSET) 4661 *end-- = '.'; 4662 } 4663 4664 if (subr == DIF_SUBR_INET_NTOA6) 4665 goto inetout; 4666 4667 /* 4668 * Set v6end to skip the IPv4 address that 4669 * we have already stringified. 4670 */ 4671 v6end = 10; 4672 } 4673 4674 /* 4675 * Build the IPv6 string by working through the 4676 * address in reverse. 4677 */ 4678 for (i = v6end; i >= 0; i -= 2) { 4679 ASSERT(end >= base); 4680 4681 if (i == firstzero + numzero - 2) { 4682 *end-- = ':'; 4683 *end-- = ':'; 4684 i -= numzero - 2; 4685 continue; 4686 } 4687 4688 if (i < 14 && i != firstzero - 2) 4689 *end-- = ':'; 4690 4691 #if defined(sun) 4692 val = (ip6._S6_un._S6_u8[i] << 8) + 4693 ip6._S6_un._S6_u8[i + 1]; 4694 #else 4695 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4696 ip6.__u6_addr.__u6_addr8[i + 1]; 4697 #endif 4698 4699 if (val == 0) { 4700 *end-- = '0'; 4701 } else { 4702 for (; val; val /= 16) { 4703 *end-- = digits[val % 16]; 4704 } 4705 } 4706 } 4707 ASSERT(end + 1 >= base); 4708 4709 } else { 4710 /* 4711 * The user didn't use AH_INET or AH_INET6. 4712 */ 4713 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4714 regs[rd] = 0; 4715 break; 4716 } 4717 4718 inetout: regs[rd] = (uintptr_t)end + 1; 4719 mstate->dtms_scratch_ptr += size; 4720 break; 4721 } 4722 4723 case DIF_SUBR_MEMREF: { 4724 uintptr_t size = 2 * sizeof(uintptr_t); 4725 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4726 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4727 4728 /* address and length */ 4729 memref[0] = tupregs[0].dttk_value; 4730 memref[1] = tupregs[1].dttk_value; 4731 4732 regs[rd] = (uintptr_t) memref; 4733 mstate->dtms_scratch_ptr += scratch_size; 4734 break; 4735 } 4736 4737 case DIF_SUBR_TYPEREF: { 4738 uintptr_t size = 4 * sizeof(uintptr_t); 4739 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4740 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4741 4742 /* address, num_elements, type_str, type_len */ 4743 typeref[0] = tupregs[0].dttk_value; 4744 typeref[1] = tupregs[1].dttk_value; 4745 typeref[2] = tupregs[2].dttk_value; 4746 typeref[3] = tupregs[3].dttk_value; 4747 4748 regs[rd] = (uintptr_t) typeref; 4749 mstate->dtms_scratch_ptr += scratch_size; 4750 break; 4751 } 4752 } 4753 } 4754 4755 /* 4756 * Emulate the execution of DTrace IR instructions specified by the given 4757 * DIF object. This function is deliberately void of assertions as all of 4758 * the necessary checks are handled by a call to dtrace_difo_validate(). 4759 */ 4760 static uint64_t 4761 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4762 dtrace_vstate_t *vstate, dtrace_state_t *state) 4763 { 4764 const dif_instr_t *text = difo->dtdo_buf; 4765 const uint_t textlen = difo->dtdo_len; 4766 const char *strtab = difo->dtdo_strtab; 4767 const uint64_t *inttab = difo->dtdo_inttab; 4768 4769 uint64_t rval = 0; 4770 dtrace_statvar_t *svar; 4771 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4772 dtrace_difv_t *v; 4773 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4774 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4775 4776 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4777 uint64_t regs[DIF_DIR_NREGS]; 4778 uint64_t *tmp; 4779 4780 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4781 int64_t cc_r; 4782 uint_t pc = 0, id, opc = 0; 4783 uint8_t ttop = 0; 4784 dif_instr_t instr; 4785 uint_t r1, r2, rd; 4786 4787 /* 4788 * We stash the current DIF object into the machine state: we need it 4789 * for subsequent access checking. 4790 */ 4791 mstate->dtms_difo = difo; 4792 4793 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4794 4795 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4796 opc = pc; 4797 4798 instr = text[pc++]; 4799 r1 = DIF_INSTR_R1(instr); 4800 r2 = DIF_INSTR_R2(instr); 4801 rd = DIF_INSTR_RD(instr); 4802 4803 switch (DIF_INSTR_OP(instr)) { 4804 case DIF_OP_OR: 4805 regs[rd] = regs[r1] | regs[r2]; 4806 break; 4807 case DIF_OP_XOR: 4808 regs[rd] = regs[r1] ^ regs[r2]; 4809 break; 4810 case DIF_OP_AND: 4811 regs[rd] = regs[r1] & regs[r2]; 4812 break; 4813 case DIF_OP_SLL: 4814 regs[rd] = regs[r1] << regs[r2]; 4815 break; 4816 case DIF_OP_SRL: 4817 regs[rd] = regs[r1] >> regs[r2]; 4818 break; 4819 case DIF_OP_SUB: 4820 regs[rd] = regs[r1] - regs[r2]; 4821 break; 4822 case DIF_OP_ADD: 4823 regs[rd] = regs[r1] + regs[r2]; 4824 break; 4825 case DIF_OP_MUL: 4826 regs[rd] = regs[r1] * regs[r2]; 4827 break; 4828 case DIF_OP_SDIV: 4829 if (regs[r2] == 0) { 4830 regs[rd] = 0; 4831 *flags |= CPU_DTRACE_DIVZERO; 4832 } else { 4833 regs[rd] = (int64_t)regs[r1] / 4834 (int64_t)regs[r2]; 4835 } 4836 break; 4837 4838 case DIF_OP_UDIV: 4839 if (regs[r2] == 0) { 4840 regs[rd] = 0; 4841 *flags |= CPU_DTRACE_DIVZERO; 4842 } else { 4843 regs[rd] = regs[r1] / regs[r2]; 4844 } 4845 break; 4846 4847 case DIF_OP_SREM: 4848 if (regs[r2] == 0) { 4849 regs[rd] = 0; 4850 *flags |= CPU_DTRACE_DIVZERO; 4851 } else { 4852 regs[rd] = (int64_t)regs[r1] % 4853 (int64_t)regs[r2]; 4854 } 4855 break; 4856 4857 case DIF_OP_UREM: 4858 if (regs[r2] == 0) { 4859 regs[rd] = 0; 4860 *flags |= CPU_DTRACE_DIVZERO; 4861 } else { 4862 regs[rd] = regs[r1] % regs[r2]; 4863 } 4864 break; 4865 4866 case DIF_OP_NOT: 4867 regs[rd] = ~regs[r1]; 4868 break; 4869 case DIF_OP_MOV: 4870 regs[rd] = regs[r1]; 4871 break; 4872 case DIF_OP_CMP: 4873 cc_r = regs[r1] - regs[r2]; 4874 cc_n = cc_r < 0; 4875 cc_z = cc_r == 0; 4876 cc_v = 0; 4877 cc_c = regs[r1] < regs[r2]; 4878 break; 4879 case DIF_OP_TST: 4880 cc_n = cc_v = cc_c = 0; 4881 cc_z = regs[r1] == 0; 4882 break; 4883 case DIF_OP_BA: 4884 pc = DIF_INSTR_LABEL(instr); 4885 break; 4886 case DIF_OP_BE: 4887 if (cc_z) 4888 pc = DIF_INSTR_LABEL(instr); 4889 break; 4890 case DIF_OP_BNE: 4891 if (cc_z == 0) 4892 pc = DIF_INSTR_LABEL(instr); 4893 break; 4894 case DIF_OP_BG: 4895 if ((cc_z | (cc_n ^ cc_v)) == 0) 4896 pc = DIF_INSTR_LABEL(instr); 4897 break; 4898 case DIF_OP_BGU: 4899 if ((cc_c | cc_z) == 0) 4900 pc = DIF_INSTR_LABEL(instr); 4901 break; 4902 case DIF_OP_BGE: 4903 if ((cc_n ^ cc_v) == 0) 4904 pc = DIF_INSTR_LABEL(instr); 4905 break; 4906 case DIF_OP_BGEU: 4907 if (cc_c == 0) 4908 pc = DIF_INSTR_LABEL(instr); 4909 break; 4910 case DIF_OP_BL: 4911 if (cc_n ^ cc_v) 4912 pc = DIF_INSTR_LABEL(instr); 4913 break; 4914 case DIF_OP_BLU: 4915 if (cc_c) 4916 pc = DIF_INSTR_LABEL(instr); 4917 break; 4918 case DIF_OP_BLE: 4919 if (cc_z | (cc_n ^ cc_v)) 4920 pc = DIF_INSTR_LABEL(instr); 4921 break; 4922 case DIF_OP_BLEU: 4923 if (cc_c | cc_z) 4924 pc = DIF_INSTR_LABEL(instr); 4925 break; 4926 case DIF_OP_RLDSB: 4927 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4928 *flags |= CPU_DTRACE_KPRIV; 4929 *illval = regs[r1]; 4930 break; 4931 } 4932 /*FALLTHROUGH*/ 4933 case DIF_OP_LDSB: 4934 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4935 break; 4936 case DIF_OP_RLDSH: 4937 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4938 *flags |= CPU_DTRACE_KPRIV; 4939 *illval = regs[r1]; 4940 break; 4941 } 4942 /*FALLTHROUGH*/ 4943 case DIF_OP_LDSH: 4944 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4945 break; 4946 case DIF_OP_RLDSW: 4947 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4948 *flags |= CPU_DTRACE_KPRIV; 4949 *illval = regs[r1]; 4950 break; 4951 } 4952 /*FALLTHROUGH*/ 4953 case DIF_OP_LDSW: 4954 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4955 break; 4956 case DIF_OP_RLDUB: 4957 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4958 *flags |= CPU_DTRACE_KPRIV; 4959 *illval = regs[r1]; 4960 break; 4961 } 4962 /*FALLTHROUGH*/ 4963 case DIF_OP_LDUB: 4964 regs[rd] = dtrace_load8(regs[r1]); 4965 break; 4966 case DIF_OP_RLDUH: 4967 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4968 *flags |= CPU_DTRACE_KPRIV; 4969 *illval = regs[r1]; 4970 break; 4971 } 4972 /*FALLTHROUGH*/ 4973 case DIF_OP_LDUH: 4974 regs[rd] = dtrace_load16(regs[r1]); 4975 break; 4976 case DIF_OP_RLDUW: 4977 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4978 *flags |= CPU_DTRACE_KPRIV; 4979 *illval = regs[r1]; 4980 break; 4981 } 4982 /*FALLTHROUGH*/ 4983 case DIF_OP_LDUW: 4984 regs[rd] = dtrace_load32(regs[r1]); 4985 break; 4986 case DIF_OP_RLDX: 4987 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4988 *flags |= CPU_DTRACE_KPRIV; 4989 *illval = regs[r1]; 4990 break; 4991 } 4992 /*FALLTHROUGH*/ 4993 case DIF_OP_LDX: 4994 regs[rd] = dtrace_load64(regs[r1]); 4995 break; 4996 case DIF_OP_ULDSB: 4997 regs[rd] = (int8_t) 4998 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4999 break; 5000 case DIF_OP_ULDSH: 5001 regs[rd] = (int16_t) 5002 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5003 break; 5004 case DIF_OP_ULDSW: 5005 regs[rd] = (int32_t) 5006 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5007 break; 5008 case DIF_OP_ULDUB: 5009 regs[rd] = 5010 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5011 break; 5012 case DIF_OP_ULDUH: 5013 regs[rd] = 5014 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5015 break; 5016 case DIF_OP_ULDUW: 5017 regs[rd] = 5018 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5019 break; 5020 case DIF_OP_ULDX: 5021 regs[rd] = 5022 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5023 break; 5024 case DIF_OP_RET: 5025 rval = regs[rd]; 5026 pc = textlen; 5027 break; 5028 case DIF_OP_NOP: 5029 break; 5030 case DIF_OP_SETX: 5031 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5032 break; 5033 case DIF_OP_SETS: 5034 regs[rd] = (uint64_t)(uintptr_t) 5035 (strtab + DIF_INSTR_STRING(instr)); 5036 break; 5037 case DIF_OP_SCMP: { 5038 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5039 uintptr_t s1 = regs[r1]; 5040 uintptr_t s2 = regs[r2]; 5041 5042 if (s1 != 0 && 5043 !dtrace_strcanload(s1, sz, mstate, vstate)) 5044 break; 5045 if (s2 != 0 && 5046 !dtrace_strcanload(s2, sz, mstate, vstate)) 5047 break; 5048 5049 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5050 5051 cc_n = cc_r < 0; 5052 cc_z = cc_r == 0; 5053 cc_v = cc_c = 0; 5054 break; 5055 } 5056 case DIF_OP_LDGA: 5057 regs[rd] = dtrace_dif_variable(mstate, state, 5058 r1, regs[r2]); 5059 break; 5060 case DIF_OP_LDGS: 5061 id = DIF_INSTR_VAR(instr); 5062 5063 if (id >= DIF_VAR_OTHER_UBASE) { 5064 uintptr_t a; 5065 5066 id -= DIF_VAR_OTHER_UBASE; 5067 svar = vstate->dtvs_globals[id]; 5068 ASSERT(svar != NULL); 5069 v = &svar->dtsv_var; 5070 5071 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5072 regs[rd] = svar->dtsv_data; 5073 break; 5074 } 5075 5076 a = (uintptr_t)svar->dtsv_data; 5077 5078 if (*(uint8_t *)a == UINT8_MAX) { 5079 /* 5080 * If the 0th byte is set to UINT8_MAX 5081 * then this is to be treated as a 5082 * reference to a NULL variable. 5083 */ 5084 regs[rd] = 0; 5085 } else { 5086 regs[rd] = a + sizeof (uint64_t); 5087 } 5088 5089 break; 5090 } 5091 5092 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5093 break; 5094 5095 case DIF_OP_STGS: 5096 id = DIF_INSTR_VAR(instr); 5097 5098 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5099 id -= DIF_VAR_OTHER_UBASE; 5100 5101 svar = vstate->dtvs_globals[id]; 5102 ASSERT(svar != NULL); 5103 v = &svar->dtsv_var; 5104 5105 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5106 uintptr_t a = (uintptr_t)svar->dtsv_data; 5107 5108 ASSERT(a != 0); 5109 ASSERT(svar->dtsv_size != 0); 5110 5111 if (regs[rd] == 0) { 5112 *(uint8_t *)a = UINT8_MAX; 5113 break; 5114 } else { 5115 *(uint8_t *)a = 0; 5116 a += sizeof (uint64_t); 5117 } 5118 if (!dtrace_vcanload( 5119 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5120 mstate, vstate)) 5121 break; 5122 5123 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5124 (void *)a, &v->dtdv_type); 5125 break; 5126 } 5127 5128 svar->dtsv_data = regs[rd]; 5129 break; 5130 5131 case DIF_OP_LDTA: 5132 /* 5133 * There are no DTrace built-in thread-local arrays at 5134 * present. This opcode is saved for future work. 5135 */ 5136 *flags |= CPU_DTRACE_ILLOP; 5137 regs[rd] = 0; 5138 break; 5139 5140 case DIF_OP_LDLS: 5141 id = DIF_INSTR_VAR(instr); 5142 5143 if (id < DIF_VAR_OTHER_UBASE) { 5144 /* 5145 * For now, this has no meaning. 5146 */ 5147 regs[rd] = 0; 5148 break; 5149 } 5150 5151 id -= DIF_VAR_OTHER_UBASE; 5152 5153 ASSERT(id < vstate->dtvs_nlocals); 5154 ASSERT(vstate->dtvs_locals != NULL); 5155 5156 svar = vstate->dtvs_locals[id]; 5157 ASSERT(svar != NULL); 5158 v = &svar->dtsv_var; 5159 5160 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5161 uintptr_t a = (uintptr_t)svar->dtsv_data; 5162 size_t sz = v->dtdv_type.dtdt_size; 5163 5164 sz += sizeof (uint64_t); 5165 ASSERT(svar->dtsv_size == NCPU * sz); 5166 a += curcpu * sz; 5167 5168 if (*(uint8_t *)a == UINT8_MAX) { 5169 /* 5170 * If the 0th byte is set to UINT8_MAX 5171 * then this is to be treated as a 5172 * reference to a NULL variable. 5173 */ 5174 regs[rd] = 0; 5175 } else { 5176 regs[rd] = a + sizeof (uint64_t); 5177 } 5178 5179 break; 5180 } 5181 5182 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5183 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5184 regs[rd] = tmp[curcpu]; 5185 break; 5186 5187 case DIF_OP_STLS: 5188 id = DIF_INSTR_VAR(instr); 5189 5190 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5191 id -= DIF_VAR_OTHER_UBASE; 5192 ASSERT(id < vstate->dtvs_nlocals); 5193 5194 ASSERT(vstate->dtvs_locals != NULL); 5195 svar = vstate->dtvs_locals[id]; 5196 ASSERT(svar != NULL); 5197 v = &svar->dtsv_var; 5198 5199 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5200 uintptr_t a = (uintptr_t)svar->dtsv_data; 5201 size_t sz = v->dtdv_type.dtdt_size; 5202 5203 sz += sizeof (uint64_t); 5204 ASSERT(svar->dtsv_size == NCPU * sz); 5205 a += curcpu * sz; 5206 5207 if (regs[rd] == 0) { 5208 *(uint8_t *)a = UINT8_MAX; 5209 break; 5210 } else { 5211 *(uint8_t *)a = 0; 5212 a += sizeof (uint64_t); 5213 } 5214 5215 if (!dtrace_vcanload( 5216 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5217 mstate, vstate)) 5218 break; 5219 5220 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5221 (void *)a, &v->dtdv_type); 5222 break; 5223 } 5224 5225 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5226 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5227 tmp[curcpu] = regs[rd]; 5228 break; 5229 5230 case DIF_OP_LDTS: { 5231 dtrace_dynvar_t *dvar; 5232 dtrace_key_t *key; 5233 5234 id = DIF_INSTR_VAR(instr); 5235 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5236 id -= DIF_VAR_OTHER_UBASE; 5237 v = &vstate->dtvs_tlocals[id]; 5238 5239 key = &tupregs[DIF_DTR_NREGS]; 5240 key[0].dttk_value = (uint64_t)id; 5241 key[0].dttk_size = 0; 5242 DTRACE_TLS_THRKEY(key[1].dttk_value); 5243 key[1].dttk_size = 0; 5244 5245 dvar = dtrace_dynvar(dstate, 2, key, 5246 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5247 mstate, vstate); 5248 5249 if (dvar == NULL) { 5250 regs[rd] = 0; 5251 break; 5252 } 5253 5254 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5255 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5256 } else { 5257 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5258 } 5259 5260 break; 5261 } 5262 5263 case DIF_OP_STTS: { 5264 dtrace_dynvar_t *dvar; 5265 dtrace_key_t *key; 5266 5267 id = DIF_INSTR_VAR(instr); 5268 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5269 id -= DIF_VAR_OTHER_UBASE; 5270 5271 key = &tupregs[DIF_DTR_NREGS]; 5272 key[0].dttk_value = (uint64_t)id; 5273 key[0].dttk_size = 0; 5274 DTRACE_TLS_THRKEY(key[1].dttk_value); 5275 key[1].dttk_size = 0; 5276 v = &vstate->dtvs_tlocals[id]; 5277 5278 dvar = dtrace_dynvar(dstate, 2, key, 5279 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5280 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5281 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5282 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5283 5284 /* 5285 * Given that we're storing to thread-local data, 5286 * we need to flush our predicate cache. 5287 */ 5288 curthread->t_predcache = 0; 5289 5290 if (dvar == NULL) 5291 break; 5292 5293 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5294 if (!dtrace_vcanload( 5295 (void *)(uintptr_t)regs[rd], 5296 &v->dtdv_type, mstate, vstate)) 5297 break; 5298 5299 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5300 dvar->dtdv_data, &v->dtdv_type); 5301 } else { 5302 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5303 } 5304 5305 break; 5306 } 5307 5308 case DIF_OP_SRA: 5309 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5310 break; 5311 5312 case DIF_OP_CALL: 5313 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5314 regs, tupregs, ttop, mstate, state); 5315 break; 5316 5317 case DIF_OP_PUSHTR: 5318 if (ttop == DIF_DTR_NREGS) { 5319 *flags |= CPU_DTRACE_TUPOFLOW; 5320 break; 5321 } 5322 5323 if (r1 == DIF_TYPE_STRING) { 5324 /* 5325 * If this is a string type and the size is 0, 5326 * we'll use the system-wide default string 5327 * size. Note that we are _not_ looking at 5328 * the value of the DTRACEOPT_STRSIZE option; 5329 * had this been set, we would expect to have 5330 * a non-zero size value in the "pushtr". 5331 */ 5332 tupregs[ttop].dttk_size = 5333 dtrace_strlen((char *)(uintptr_t)regs[rd], 5334 regs[r2] ? regs[r2] : 5335 dtrace_strsize_default) + 1; 5336 } else { 5337 tupregs[ttop].dttk_size = regs[r2]; 5338 } 5339 5340 tupregs[ttop++].dttk_value = regs[rd]; 5341 break; 5342 5343 case DIF_OP_PUSHTV: 5344 if (ttop == DIF_DTR_NREGS) { 5345 *flags |= CPU_DTRACE_TUPOFLOW; 5346 break; 5347 } 5348 5349 tupregs[ttop].dttk_value = regs[rd]; 5350 tupregs[ttop++].dttk_size = 0; 5351 break; 5352 5353 case DIF_OP_POPTS: 5354 if (ttop != 0) 5355 ttop--; 5356 break; 5357 5358 case DIF_OP_FLUSHTS: 5359 ttop = 0; 5360 break; 5361 5362 case DIF_OP_LDGAA: 5363 case DIF_OP_LDTAA: { 5364 dtrace_dynvar_t *dvar; 5365 dtrace_key_t *key = tupregs; 5366 uint_t nkeys = ttop; 5367 5368 id = DIF_INSTR_VAR(instr); 5369 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5370 id -= DIF_VAR_OTHER_UBASE; 5371 5372 key[nkeys].dttk_value = (uint64_t)id; 5373 key[nkeys++].dttk_size = 0; 5374 5375 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5376 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5377 key[nkeys++].dttk_size = 0; 5378 v = &vstate->dtvs_tlocals[id]; 5379 } else { 5380 v = &vstate->dtvs_globals[id]->dtsv_var; 5381 } 5382 5383 dvar = dtrace_dynvar(dstate, nkeys, key, 5384 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5385 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5386 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5387 5388 if (dvar == NULL) { 5389 regs[rd] = 0; 5390 break; 5391 } 5392 5393 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5394 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5395 } else { 5396 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5397 } 5398 5399 break; 5400 } 5401 5402 case DIF_OP_STGAA: 5403 case DIF_OP_STTAA: { 5404 dtrace_dynvar_t *dvar; 5405 dtrace_key_t *key = tupregs; 5406 uint_t nkeys = ttop; 5407 5408 id = DIF_INSTR_VAR(instr); 5409 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5410 id -= DIF_VAR_OTHER_UBASE; 5411 5412 key[nkeys].dttk_value = (uint64_t)id; 5413 key[nkeys++].dttk_size = 0; 5414 5415 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5416 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5417 key[nkeys++].dttk_size = 0; 5418 v = &vstate->dtvs_tlocals[id]; 5419 } else { 5420 v = &vstate->dtvs_globals[id]->dtsv_var; 5421 } 5422 5423 dvar = dtrace_dynvar(dstate, nkeys, key, 5424 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5425 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5426 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5427 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5428 5429 if (dvar == NULL) 5430 break; 5431 5432 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5433 if (!dtrace_vcanload( 5434 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5435 mstate, vstate)) 5436 break; 5437 5438 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5439 dvar->dtdv_data, &v->dtdv_type); 5440 } else { 5441 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5442 } 5443 5444 break; 5445 } 5446 5447 case DIF_OP_ALLOCS: { 5448 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5449 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5450 5451 /* 5452 * Rounding up the user allocation size could have 5453 * overflowed large, bogus allocations (like -1ULL) to 5454 * 0. 5455 */ 5456 if (size < regs[r1] || 5457 !DTRACE_INSCRATCH(mstate, size)) { 5458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5459 regs[rd] = 0; 5460 break; 5461 } 5462 5463 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5464 mstate->dtms_scratch_ptr += size; 5465 regs[rd] = ptr; 5466 break; 5467 } 5468 5469 case DIF_OP_COPYS: 5470 if (!dtrace_canstore(regs[rd], regs[r2], 5471 mstate, vstate)) { 5472 *flags |= CPU_DTRACE_BADADDR; 5473 *illval = regs[rd]; 5474 break; 5475 } 5476 5477 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5478 break; 5479 5480 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5481 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5482 break; 5483 5484 case DIF_OP_STB: 5485 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5486 *flags |= CPU_DTRACE_BADADDR; 5487 *illval = regs[rd]; 5488 break; 5489 } 5490 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5491 break; 5492 5493 case DIF_OP_STH: 5494 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5495 *flags |= CPU_DTRACE_BADADDR; 5496 *illval = regs[rd]; 5497 break; 5498 } 5499 if (regs[rd] & 1) { 5500 *flags |= CPU_DTRACE_BADALIGN; 5501 *illval = regs[rd]; 5502 break; 5503 } 5504 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5505 break; 5506 5507 case DIF_OP_STW: 5508 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5509 *flags |= CPU_DTRACE_BADADDR; 5510 *illval = regs[rd]; 5511 break; 5512 } 5513 if (regs[rd] & 3) { 5514 *flags |= CPU_DTRACE_BADALIGN; 5515 *illval = regs[rd]; 5516 break; 5517 } 5518 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5519 break; 5520 5521 case DIF_OP_STX: 5522 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5523 *flags |= CPU_DTRACE_BADADDR; 5524 *illval = regs[rd]; 5525 break; 5526 } 5527 if (regs[rd] & 7) { 5528 *flags |= CPU_DTRACE_BADALIGN; 5529 *illval = regs[rd]; 5530 break; 5531 } 5532 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5533 break; 5534 } 5535 } 5536 5537 if (!(*flags & CPU_DTRACE_FAULT)) 5538 return (rval); 5539 5540 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5541 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5542 5543 return (0); 5544 } 5545 5546 static void 5547 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5548 { 5549 dtrace_probe_t *probe = ecb->dte_probe; 5550 dtrace_provider_t *prov = probe->dtpr_provider; 5551 char c[DTRACE_FULLNAMELEN + 80], *str; 5552 char *msg = "dtrace: breakpoint action at probe "; 5553 char *ecbmsg = " (ecb "; 5554 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5555 uintptr_t val = (uintptr_t)ecb; 5556 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5557 5558 if (dtrace_destructive_disallow) 5559 return; 5560 5561 /* 5562 * It's impossible to be taking action on the NULL probe. 5563 */ 5564 ASSERT(probe != NULL); 5565 5566 /* 5567 * This is a poor man's (destitute man's?) sprintf(): we want to 5568 * print the provider name, module name, function name and name of 5569 * the probe, along with the hex address of the ECB with the breakpoint 5570 * action -- all of which we must place in the character buffer by 5571 * hand. 5572 */ 5573 while (*msg != '\0') 5574 c[i++] = *msg++; 5575 5576 for (str = prov->dtpv_name; *str != '\0'; str++) 5577 c[i++] = *str; 5578 c[i++] = ':'; 5579 5580 for (str = probe->dtpr_mod; *str != '\0'; str++) 5581 c[i++] = *str; 5582 c[i++] = ':'; 5583 5584 for (str = probe->dtpr_func; *str != '\0'; str++) 5585 c[i++] = *str; 5586 c[i++] = ':'; 5587 5588 for (str = probe->dtpr_name; *str != '\0'; str++) 5589 c[i++] = *str; 5590 5591 while (*ecbmsg != '\0') 5592 c[i++] = *ecbmsg++; 5593 5594 while (shift >= 0) { 5595 mask = (uintptr_t)0xf << shift; 5596 5597 if (val >= ((uintptr_t)1 << shift)) 5598 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5599 shift -= 4; 5600 } 5601 5602 c[i++] = ')'; 5603 c[i] = '\0'; 5604 5605 #if defined(sun) 5606 debug_enter(c); 5607 #else 5608 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5609 #endif 5610 } 5611 5612 static void 5613 dtrace_action_panic(dtrace_ecb_t *ecb) 5614 { 5615 dtrace_probe_t *probe = ecb->dte_probe; 5616 5617 /* 5618 * It's impossible to be taking action on the NULL probe. 5619 */ 5620 ASSERT(probe != NULL); 5621 5622 if (dtrace_destructive_disallow) 5623 return; 5624 5625 if (dtrace_panicked != NULL) 5626 return; 5627 5628 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5629 return; 5630 5631 /* 5632 * We won the right to panic. (We want to be sure that only one 5633 * thread calls panic() from dtrace_probe(), and that panic() is 5634 * called exactly once.) 5635 */ 5636 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5637 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5638 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5639 } 5640 5641 static void 5642 dtrace_action_raise(uint64_t sig) 5643 { 5644 if (dtrace_destructive_disallow) 5645 return; 5646 5647 if (sig >= NSIG) { 5648 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5649 return; 5650 } 5651 5652 #if defined(sun) 5653 /* 5654 * raise() has a queue depth of 1 -- we ignore all subsequent 5655 * invocations of the raise() action. 5656 */ 5657 if (curthread->t_dtrace_sig == 0) 5658 curthread->t_dtrace_sig = (uint8_t)sig; 5659 5660 curthread->t_sig_check = 1; 5661 aston(curthread); 5662 #else 5663 struct proc *p = curproc; 5664 PROC_LOCK(p); 5665 psignal(p, sig); 5666 PROC_UNLOCK(p); 5667 #endif 5668 } 5669 5670 static void 5671 dtrace_action_stop(void) 5672 { 5673 if (dtrace_destructive_disallow) 5674 return; 5675 5676 #if defined(sun) 5677 if (!curthread->t_dtrace_stop) { 5678 curthread->t_dtrace_stop = 1; 5679 curthread->t_sig_check = 1; 5680 aston(curthread); 5681 } 5682 #else 5683 struct proc *p = curproc; 5684 PROC_LOCK(p); 5685 psignal(p, SIGSTOP); 5686 PROC_UNLOCK(p); 5687 #endif 5688 } 5689 5690 static void 5691 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5692 { 5693 hrtime_t now; 5694 volatile uint16_t *flags; 5695 #if defined(sun) 5696 cpu_t *cpu = CPU; 5697 #else 5698 cpu_t *cpu = &solaris_cpu[curcpu]; 5699 #endif 5700 5701 if (dtrace_destructive_disallow) 5702 return; 5703 5704 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5705 5706 now = dtrace_gethrtime(); 5707 5708 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5709 /* 5710 * We need to advance the mark to the current time. 5711 */ 5712 cpu->cpu_dtrace_chillmark = now; 5713 cpu->cpu_dtrace_chilled = 0; 5714 } 5715 5716 /* 5717 * Now check to see if the requested chill time would take us over 5718 * the maximum amount of time allowed in the chill interval. (Or 5719 * worse, if the calculation itself induces overflow.) 5720 */ 5721 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5722 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5723 *flags |= CPU_DTRACE_ILLOP; 5724 return; 5725 } 5726 5727 while (dtrace_gethrtime() - now < val) 5728 continue; 5729 5730 /* 5731 * Normally, we assure that the value of the variable "timestamp" does 5732 * not change within an ECB. The presence of chill() represents an 5733 * exception to this rule, however. 5734 */ 5735 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5736 cpu->cpu_dtrace_chilled += val; 5737 } 5738 5739 #if defined(sun) 5740 static void 5741 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5742 uint64_t *buf, uint64_t arg) 5743 { 5744 int nframes = DTRACE_USTACK_NFRAMES(arg); 5745 int strsize = DTRACE_USTACK_STRSIZE(arg); 5746 uint64_t *pcs = &buf[1], *fps; 5747 char *str = (char *)&pcs[nframes]; 5748 int size, offs = 0, i, j; 5749 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5750 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5751 char *sym; 5752 5753 /* 5754 * Should be taking a faster path if string space has not been 5755 * allocated. 5756 */ 5757 ASSERT(strsize != 0); 5758 5759 /* 5760 * We will first allocate some temporary space for the frame pointers. 5761 */ 5762 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5763 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5764 (nframes * sizeof (uint64_t)); 5765 5766 if (!DTRACE_INSCRATCH(mstate, size)) { 5767 /* 5768 * Not enough room for our frame pointers -- need to indicate 5769 * that we ran out of scratch space. 5770 */ 5771 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5772 return; 5773 } 5774 5775 mstate->dtms_scratch_ptr += size; 5776 saved = mstate->dtms_scratch_ptr; 5777 5778 /* 5779 * Now get a stack with both program counters and frame pointers. 5780 */ 5781 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5782 dtrace_getufpstack(buf, fps, nframes + 1); 5783 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5784 5785 /* 5786 * If that faulted, we're cooked. 5787 */ 5788 if (*flags & CPU_DTRACE_FAULT) 5789 goto out; 5790 5791 /* 5792 * Now we want to walk up the stack, calling the USTACK helper. For 5793 * each iteration, we restore the scratch pointer. 5794 */ 5795 for (i = 0; i < nframes; i++) { 5796 mstate->dtms_scratch_ptr = saved; 5797 5798 if (offs >= strsize) 5799 break; 5800 5801 sym = (char *)(uintptr_t)dtrace_helper( 5802 DTRACE_HELPER_ACTION_USTACK, 5803 mstate, state, pcs[i], fps[i]); 5804 5805 /* 5806 * If we faulted while running the helper, we're going to 5807 * clear the fault and null out the corresponding string. 5808 */ 5809 if (*flags & CPU_DTRACE_FAULT) { 5810 *flags &= ~CPU_DTRACE_FAULT; 5811 str[offs++] = '\0'; 5812 continue; 5813 } 5814 5815 if (sym == NULL) { 5816 str[offs++] = '\0'; 5817 continue; 5818 } 5819 5820 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5821 5822 /* 5823 * Now copy in the string that the helper returned to us. 5824 */ 5825 for (j = 0; offs + j < strsize; j++) { 5826 if ((str[offs + j] = sym[j]) == '\0') 5827 break; 5828 } 5829 5830 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5831 5832 offs += j + 1; 5833 } 5834 5835 if (offs >= strsize) { 5836 /* 5837 * If we didn't have room for all of the strings, we don't 5838 * abort processing -- this needn't be a fatal error -- but we 5839 * still want to increment a counter (dts_stkstroverflows) to 5840 * allow this condition to be warned about. (If this is from 5841 * a jstack() action, it is easily tuned via jstackstrsize.) 5842 */ 5843 dtrace_error(&state->dts_stkstroverflows); 5844 } 5845 5846 while (offs < strsize) 5847 str[offs++] = '\0'; 5848 5849 out: 5850 mstate->dtms_scratch_ptr = old; 5851 } 5852 #endif 5853 5854 /* 5855 * If you're looking for the epicenter of DTrace, you just found it. This 5856 * is the function called by the provider to fire a probe -- from which all 5857 * subsequent probe-context DTrace activity emanates. 5858 */ 5859 void 5860 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5861 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5862 { 5863 processorid_t cpuid; 5864 dtrace_icookie_t cookie; 5865 dtrace_probe_t *probe; 5866 dtrace_mstate_t mstate; 5867 dtrace_ecb_t *ecb; 5868 dtrace_action_t *act; 5869 intptr_t offs; 5870 size_t size; 5871 int vtime, onintr; 5872 volatile uint16_t *flags; 5873 hrtime_t now; 5874 5875 #if defined(sun) 5876 /* 5877 * Kick out immediately if this CPU is still being born (in which case 5878 * curthread will be set to -1) or the current thread can't allow 5879 * probes in its current context. 5880 */ 5881 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5882 return; 5883 #endif 5884 5885 cookie = dtrace_interrupt_disable(); 5886 probe = dtrace_probes[id - 1]; 5887 cpuid = curcpu; 5888 onintr = CPU_ON_INTR(CPU); 5889 5890 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5891 probe->dtpr_predcache == curthread->t_predcache) { 5892 /* 5893 * We have hit in the predicate cache; we know that 5894 * this predicate would evaluate to be false. 5895 */ 5896 dtrace_interrupt_enable(cookie); 5897 return; 5898 } 5899 5900 #if defined(sun) 5901 if (panic_quiesce) { 5902 #else 5903 if (panicstr != NULL) { 5904 #endif 5905 /* 5906 * We don't trace anything if we're panicking. 5907 */ 5908 dtrace_interrupt_enable(cookie); 5909 return; 5910 } 5911 5912 now = dtrace_gethrtime(); 5913 vtime = dtrace_vtime_references != 0; 5914 5915 if (vtime && curthread->t_dtrace_start) 5916 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5917 5918 mstate.dtms_difo = NULL; 5919 mstate.dtms_probe = probe; 5920 mstate.dtms_strtok = 0; 5921 mstate.dtms_arg[0] = arg0; 5922 mstate.dtms_arg[1] = arg1; 5923 mstate.dtms_arg[2] = arg2; 5924 mstate.dtms_arg[3] = arg3; 5925 mstate.dtms_arg[4] = arg4; 5926 5927 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5928 5929 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5930 dtrace_predicate_t *pred = ecb->dte_predicate; 5931 dtrace_state_t *state = ecb->dte_state; 5932 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5933 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5934 dtrace_vstate_t *vstate = &state->dts_vstate; 5935 dtrace_provider_t *prov = probe->dtpr_provider; 5936 int committed = 0; 5937 caddr_t tomax; 5938 5939 /* 5940 * A little subtlety with the following (seemingly innocuous) 5941 * declaration of the automatic 'val': by looking at the 5942 * code, you might think that it could be declared in the 5943 * action processing loop, below. (That is, it's only used in 5944 * the action processing loop.) However, it must be declared 5945 * out of that scope because in the case of DIF expression 5946 * arguments to aggregating actions, one iteration of the 5947 * action loop will use the last iteration's value. 5948 */ 5949 uint64_t val = 0; 5950 5951 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5952 *flags &= ~CPU_DTRACE_ERROR; 5953 5954 if (prov == dtrace_provider) { 5955 /* 5956 * If dtrace itself is the provider of this probe, 5957 * we're only going to continue processing the ECB if 5958 * arg0 (the dtrace_state_t) is equal to the ECB's 5959 * creating state. (This prevents disjoint consumers 5960 * from seeing one another's metaprobes.) 5961 */ 5962 if (arg0 != (uint64_t)(uintptr_t)state) 5963 continue; 5964 } 5965 5966 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5967 /* 5968 * We're not currently active. If our provider isn't 5969 * the dtrace pseudo provider, we're not interested. 5970 */ 5971 if (prov != dtrace_provider) 5972 continue; 5973 5974 /* 5975 * Now we must further check if we are in the BEGIN 5976 * probe. If we are, we will only continue processing 5977 * if we're still in WARMUP -- if one BEGIN enabling 5978 * has invoked the exit() action, we don't want to 5979 * evaluate subsequent BEGIN enablings. 5980 */ 5981 if (probe->dtpr_id == dtrace_probeid_begin && 5982 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5983 ASSERT(state->dts_activity == 5984 DTRACE_ACTIVITY_DRAINING); 5985 continue; 5986 } 5987 } 5988 5989 if (ecb->dte_cond) { 5990 /* 5991 * If the dte_cond bits indicate that this 5992 * consumer is only allowed to see user-mode firings 5993 * of this probe, call the provider's dtps_usermode() 5994 * entry point to check that the probe was fired 5995 * while in a user context. Skip this ECB if that's 5996 * not the case. 5997 */ 5998 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5999 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6000 probe->dtpr_id, probe->dtpr_arg) == 0) 6001 continue; 6002 6003 #if defined(sun) 6004 /* 6005 * This is more subtle than it looks. We have to be 6006 * absolutely certain that CRED() isn't going to 6007 * change out from under us so it's only legit to 6008 * examine that structure if we're in constrained 6009 * situations. Currently, the only times we'll this 6010 * check is if a non-super-user has enabled the 6011 * profile or syscall providers -- providers that 6012 * allow visibility of all processes. For the 6013 * profile case, the check above will ensure that 6014 * we're examining a user context. 6015 */ 6016 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6017 cred_t *cr; 6018 cred_t *s_cr = 6019 ecb->dte_state->dts_cred.dcr_cred; 6020 proc_t *proc; 6021 6022 ASSERT(s_cr != NULL); 6023 6024 if ((cr = CRED()) == NULL || 6025 s_cr->cr_uid != cr->cr_uid || 6026 s_cr->cr_uid != cr->cr_ruid || 6027 s_cr->cr_uid != cr->cr_suid || 6028 s_cr->cr_gid != cr->cr_gid || 6029 s_cr->cr_gid != cr->cr_rgid || 6030 s_cr->cr_gid != cr->cr_sgid || 6031 (proc = ttoproc(curthread)) == NULL || 6032 (proc->p_flag & SNOCD)) 6033 continue; 6034 } 6035 6036 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6037 cred_t *cr; 6038 cred_t *s_cr = 6039 ecb->dte_state->dts_cred.dcr_cred; 6040 6041 ASSERT(s_cr != NULL); 6042 6043 if ((cr = CRED()) == NULL || 6044 s_cr->cr_zone->zone_id != 6045 cr->cr_zone->zone_id) 6046 continue; 6047 } 6048 #endif 6049 } 6050 6051 if (now - state->dts_alive > dtrace_deadman_timeout) { 6052 /* 6053 * We seem to be dead. Unless we (a) have kernel 6054 * destructive permissions (b) have expicitly enabled 6055 * destructive actions and (c) destructive actions have 6056 * not been disabled, we're going to transition into 6057 * the KILLED state, from which no further processing 6058 * on this state will be performed. 6059 */ 6060 if (!dtrace_priv_kernel_destructive(state) || 6061 !state->dts_cred.dcr_destructive || 6062 dtrace_destructive_disallow) { 6063 void *activity = &state->dts_activity; 6064 dtrace_activity_t current; 6065 6066 do { 6067 current = state->dts_activity; 6068 } while (dtrace_cas32(activity, current, 6069 DTRACE_ACTIVITY_KILLED) != current); 6070 6071 continue; 6072 } 6073 } 6074 6075 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6076 ecb->dte_alignment, state, &mstate)) < 0) 6077 continue; 6078 6079 tomax = buf->dtb_tomax; 6080 ASSERT(tomax != NULL); 6081 6082 if (ecb->dte_size != 0) 6083 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6084 6085 mstate.dtms_epid = ecb->dte_epid; 6086 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6087 6088 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6089 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6090 else 6091 mstate.dtms_access = 0; 6092 6093 if (pred != NULL) { 6094 dtrace_difo_t *dp = pred->dtp_difo; 6095 int rval; 6096 6097 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6098 6099 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6100 dtrace_cacheid_t cid = probe->dtpr_predcache; 6101 6102 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6103 /* 6104 * Update the predicate cache... 6105 */ 6106 ASSERT(cid == pred->dtp_cacheid); 6107 curthread->t_predcache = cid; 6108 } 6109 6110 continue; 6111 } 6112 } 6113 6114 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6115 act != NULL; act = act->dta_next) { 6116 size_t valoffs; 6117 dtrace_difo_t *dp; 6118 dtrace_recdesc_t *rec = &act->dta_rec; 6119 6120 size = rec->dtrd_size; 6121 valoffs = offs + rec->dtrd_offset; 6122 6123 if (DTRACEACT_ISAGG(act->dta_kind)) { 6124 uint64_t v = 0xbad; 6125 dtrace_aggregation_t *agg; 6126 6127 agg = (dtrace_aggregation_t *)act; 6128 6129 if ((dp = act->dta_difo) != NULL) 6130 v = dtrace_dif_emulate(dp, 6131 &mstate, vstate, state); 6132 6133 if (*flags & CPU_DTRACE_ERROR) 6134 continue; 6135 6136 /* 6137 * Note that we always pass the expression 6138 * value from the previous iteration of the 6139 * action loop. This value will only be used 6140 * if there is an expression argument to the 6141 * aggregating action, denoted by the 6142 * dtag_hasarg field. 6143 */ 6144 dtrace_aggregate(agg, buf, 6145 offs, aggbuf, v, val); 6146 continue; 6147 } 6148 6149 switch (act->dta_kind) { 6150 case DTRACEACT_STOP: 6151 if (dtrace_priv_proc_destructive(state)) 6152 dtrace_action_stop(); 6153 continue; 6154 6155 case DTRACEACT_BREAKPOINT: 6156 if (dtrace_priv_kernel_destructive(state)) 6157 dtrace_action_breakpoint(ecb); 6158 continue; 6159 6160 case DTRACEACT_PANIC: 6161 if (dtrace_priv_kernel_destructive(state)) 6162 dtrace_action_panic(ecb); 6163 continue; 6164 6165 case DTRACEACT_STACK: 6166 if (!dtrace_priv_kernel(state)) 6167 continue; 6168 6169 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6170 size / sizeof (pc_t), probe->dtpr_aframes, 6171 DTRACE_ANCHORED(probe) ? NULL : 6172 (uint32_t *)arg0); 6173 continue; 6174 6175 #if defined(sun) 6176 case DTRACEACT_JSTACK: 6177 case DTRACEACT_USTACK: 6178 if (!dtrace_priv_proc(state)) 6179 continue; 6180 6181 /* 6182 * See comment in DIF_VAR_PID. 6183 */ 6184 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6185 CPU_ON_INTR(CPU)) { 6186 int depth = DTRACE_USTACK_NFRAMES( 6187 rec->dtrd_arg) + 1; 6188 6189 dtrace_bzero((void *)(tomax + valoffs), 6190 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6191 + depth * sizeof (uint64_t)); 6192 6193 continue; 6194 } 6195 6196 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6197 curproc->p_dtrace_helpers != NULL) { 6198 /* 6199 * This is the slow path -- we have 6200 * allocated string space, and we're 6201 * getting the stack of a process that 6202 * has helpers. Call into a separate 6203 * routine to perform this processing. 6204 */ 6205 dtrace_action_ustack(&mstate, state, 6206 (uint64_t *)(tomax + valoffs), 6207 rec->dtrd_arg); 6208 continue; 6209 } 6210 6211 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6212 dtrace_getupcstack((uint64_t *) 6213 (tomax + valoffs), 6214 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6215 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6216 continue; 6217 #endif 6218 6219 default: 6220 break; 6221 } 6222 6223 dp = act->dta_difo; 6224 ASSERT(dp != NULL); 6225 6226 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6227 6228 if (*flags & CPU_DTRACE_ERROR) 6229 continue; 6230 6231 switch (act->dta_kind) { 6232 case DTRACEACT_SPECULATE: 6233 ASSERT(buf == &state->dts_buffer[cpuid]); 6234 buf = dtrace_speculation_buffer(state, 6235 cpuid, val); 6236 6237 if (buf == NULL) { 6238 *flags |= CPU_DTRACE_DROP; 6239 continue; 6240 } 6241 6242 offs = dtrace_buffer_reserve(buf, 6243 ecb->dte_needed, ecb->dte_alignment, 6244 state, NULL); 6245 6246 if (offs < 0) { 6247 *flags |= CPU_DTRACE_DROP; 6248 continue; 6249 } 6250 6251 tomax = buf->dtb_tomax; 6252 ASSERT(tomax != NULL); 6253 6254 if (ecb->dte_size != 0) 6255 DTRACE_STORE(uint32_t, tomax, offs, 6256 ecb->dte_epid); 6257 continue; 6258 6259 case DTRACEACT_PRINTM: { 6260 /* The DIF returns a 'memref'. */ 6261 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6262 6263 /* Get the size from the memref. */ 6264 size = memref[1]; 6265 6266 /* 6267 * Check if the size exceeds the allocated 6268 * buffer size. 6269 */ 6270 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6271 /* Flag a drop! */ 6272 *flags |= CPU_DTRACE_DROP; 6273 continue; 6274 } 6275 6276 /* Store the size in the buffer first. */ 6277 DTRACE_STORE(uintptr_t, tomax, 6278 valoffs, size); 6279 6280 /* 6281 * Offset the buffer address to the start 6282 * of the data. 6283 */ 6284 valoffs += sizeof(uintptr_t); 6285 6286 /* 6287 * Reset to the memory address rather than 6288 * the memref array, then let the BYREF 6289 * code below do the work to store the 6290 * memory data in the buffer. 6291 */ 6292 val = memref[0]; 6293 break; 6294 } 6295 6296 case DTRACEACT_PRINTT: { 6297 /* The DIF returns a 'typeref'. */ 6298 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6299 char c = '\0' + 1; 6300 size_t s; 6301 6302 /* 6303 * Get the type string length and round it 6304 * up so that the data that follows is 6305 * aligned for easy access. 6306 */ 6307 size_t typs = strlen((char *) typeref[2]) + 1; 6308 typs = roundup(typs, sizeof(uintptr_t)); 6309 6310 /* 6311 *Get the size from the typeref using the 6312 * number of elements and the type size. 6313 */ 6314 size = typeref[1] * typeref[3]; 6315 6316 /* 6317 * Check if the size exceeds the allocated 6318 * buffer size. 6319 */ 6320 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6321 /* Flag a drop! */ 6322 *flags |= CPU_DTRACE_DROP; 6323 6324 } 6325 6326 /* Store the size in the buffer first. */ 6327 DTRACE_STORE(uintptr_t, tomax, 6328 valoffs, size); 6329 valoffs += sizeof(uintptr_t); 6330 6331 /* Store the type size in the buffer. */ 6332 DTRACE_STORE(uintptr_t, tomax, 6333 valoffs, typeref[3]); 6334 valoffs += sizeof(uintptr_t); 6335 6336 val = typeref[2]; 6337 6338 for (s = 0; s < typs; s++) { 6339 if (c != '\0') 6340 c = dtrace_load8(val++); 6341 6342 DTRACE_STORE(uint8_t, tomax, 6343 valoffs++, c); 6344 } 6345 6346 /* 6347 * Reset to the memory address rather than 6348 * the typeref array, then let the BYREF 6349 * code below do the work to store the 6350 * memory data in the buffer. 6351 */ 6352 val = typeref[0]; 6353 break; 6354 } 6355 6356 case DTRACEACT_CHILL: 6357 if (dtrace_priv_kernel_destructive(state)) 6358 dtrace_action_chill(&mstate, val); 6359 continue; 6360 6361 case DTRACEACT_RAISE: 6362 if (dtrace_priv_proc_destructive(state)) 6363 dtrace_action_raise(val); 6364 continue; 6365 6366 case DTRACEACT_COMMIT: 6367 ASSERT(!committed); 6368 6369 /* 6370 * We need to commit our buffer state. 6371 */ 6372 if (ecb->dte_size) 6373 buf->dtb_offset = offs + ecb->dte_size; 6374 buf = &state->dts_buffer[cpuid]; 6375 dtrace_speculation_commit(state, cpuid, val); 6376 committed = 1; 6377 continue; 6378 6379 case DTRACEACT_DISCARD: 6380 dtrace_speculation_discard(state, cpuid, val); 6381 continue; 6382 6383 case DTRACEACT_DIFEXPR: 6384 case DTRACEACT_LIBACT: 6385 case DTRACEACT_PRINTF: 6386 case DTRACEACT_PRINTA: 6387 case DTRACEACT_SYSTEM: 6388 case DTRACEACT_FREOPEN: 6389 break; 6390 6391 case DTRACEACT_SYM: 6392 case DTRACEACT_MOD: 6393 if (!dtrace_priv_kernel(state)) 6394 continue; 6395 break; 6396 6397 case DTRACEACT_USYM: 6398 case DTRACEACT_UMOD: 6399 case DTRACEACT_UADDR: { 6400 #if defined(sun) 6401 struct pid *pid = curthread->t_procp->p_pidp; 6402 #endif 6403 6404 if (!dtrace_priv_proc(state)) 6405 continue; 6406 6407 DTRACE_STORE(uint64_t, tomax, 6408 #if defined(sun) 6409 valoffs, (uint64_t)pid->pid_id); 6410 #else 6411 valoffs, (uint64_t) curproc->p_pid); 6412 #endif 6413 DTRACE_STORE(uint64_t, tomax, 6414 valoffs + sizeof (uint64_t), val); 6415 6416 continue; 6417 } 6418 6419 case DTRACEACT_EXIT: { 6420 /* 6421 * For the exit action, we are going to attempt 6422 * to atomically set our activity to be 6423 * draining. If this fails (either because 6424 * another CPU has beat us to the exit action, 6425 * or because our current activity is something 6426 * other than ACTIVE or WARMUP), we will 6427 * continue. This assures that the exit action 6428 * can be successfully recorded at most once 6429 * when we're in the ACTIVE state. If we're 6430 * encountering the exit() action while in 6431 * COOLDOWN, however, we want to honor the new 6432 * status code. (We know that we're the only 6433 * thread in COOLDOWN, so there is no race.) 6434 */ 6435 void *activity = &state->dts_activity; 6436 dtrace_activity_t current = state->dts_activity; 6437 6438 if (current == DTRACE_ACTIVITY_COOLDOWN) 6439 break; 6440 6441 if (current != DTRACE_ACTIVITY_WARMUP) 6442 current = DTRACE_ACTIVITY_ACTIVE; 6443 6444 if (dtrace_cas32(activity, current, 6445 DTRACE_ACTIVITY_DRAINING) != current) { 6446 *flags |= CPU_DTRACE_DROP; 6447 continue; 6448 } 6449 6450 break; 6451 } 6452 6453 default: 6454 ASSERT(0); 6455 } 6456 6457 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6458 uintptr_t end = valoffs + size; 6459 6460 if (!dtrace_vcanload((void *)(uintptr_t)val, 6461 &dp->dtdo_rtype, &mstate, vstate)) 6462 continue; 6463 6464 /* 6465 * If this is a string, we're going to only 6466 * load until we find the zero byte -- after 6467 * which we'll store zero bytes. 6468 */ 6469 if (dp->dtdo_rtype.dtdt_kind == 6470 DIF_TYPE_STRING) { 6471 char c = '\0' + 1; 6472 int intuple = act->dta_intuple; 6473 size_t s; 6474 6475 for (s = 0; s < size; s++) { 6476 if (c != '\0') 6477 c = dtrace_load8(val++); 6478 6479 DTRACE_STORE(uint8_t, tomax, 6480 valoffs++, c); 6481 6482 if (c == '\0' && intuple) 6483 break; 6484 } 6485 6486 continue; 6487 } 6488 6489 while (valoffs < end) { 6490 DTRACE_STORE(uint8_t, tomax, valoffs++, 6491 dtrace_load8(val++)); 6492 } 6493 6494 continue; 6495 } 6496 6497 switch (size) { 6498 case 0: 6499 break; 6500 6501 case sizeof (uint8_t): 6502 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6503 break; 6504 case sizeof (uint16_t): 6505 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6506 break; 6507 case sizeof (uint32_t): 6508 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6509 break; 6510 case sizeof (uint64_t): 6511 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6512 break; 6513 default: 6514 /* 6515 * Any other size should have been returned by 6516 * reference, not by value. 6517 */ 6518 ASSERT(0); 6519 break; 6520 } 6521 } 6522 6523 if (*flags & CPU_DTRACE_DROP) 6524 continue; 6525 6526 if (*flags & CPU_DTRACE_FAULT) { 6527 int ndx; 6528 dtrace_action_t *err; 6529 6530 buf->dtb_errors++; 6531 6532 if (probe->dtpr_id == dtrace_probeid_error) { 6533 /* 6534 * There's nothing we can do -- we had an 6535 * error on the error probe. We bump an 6536 * error counter to at least indicate that 6537 * this condition happened. 6538 */ 6539 dtrace_error(&state->dts_dblerrors); 6540 continue; 6541 } 6542 6543 if (vtime) { 6544 /* 6545 * Before recursing on dtrace_probe(), we 6546 * need to explicitly clear out our start 6547 * time to prevent it from being accumulated 6548 * into t_dtrace_vtime. 6549 */ 6550 curthread->t_dtrace_start = 0; 6551 } 6552 6553 /* 6554 * Iterate over the actions to figure out which action 6555 * we were processing when we experienced the error. 6556 * Note that act points _past_ the faulting action; if 6557 * act is ecb->dte_action, the fault was in the 6558 * predicate, if it's ecb->dte_action->dta_next it's 6559 * in action #1, and so on. 6560 */ 6561 for (err = ecb->dte_action, ndx = 0; 6562 err != act; err = err->dta_next, ndx++) 6563 continue; 6564 6565 dtrace_probe_error(state, ecb->dte_epid, ndx, 6566 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6567 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6568 cpu_core[cpuid].cpuc_dtrace_illval); 6569 6570 continue; 6571 } 6572 6573 if (!committed) 6574 buf->dtb_offset = offs + ecb->dte_size; 6575 } 6576 6577 if (vtime) 6578 curthread->t_dtrace_start = dtrace_gethrtime(); 6579 6580 dtrace_interrupt_enable(cookie); 6581 } 6582 6583 /* 6584 * DTrace Probe Hashing Functions 6585 * 6586 * The functions in this section (and indeed, the functions in remaining 6587 * sections) are not _called_ from probe context. (Any exceptions to this are 6588 * marked with a "Note:".) Rather, they are called from elsewhere in the 6589 * DTrace framework to look-up probes in, add probes to and remove probes from 6590 * the DTrace probe hashes. (Each probe is hashed by each element of the 6591 * probe tuple -- allowing for fast lookups, regardless of what was 6592 * specified.) 6593 */ 6594 static uint_t 6595 dtrace_hash_str(const char *p) 6596 { 6597 unsigned int g; 6598 uint_t hval = 0; 6599 6600 while (*p) { 6601 hval = (hval << 4) + *p++; 6602 if ((g = (hval & 0xf0000000)) != 0) 6603 hval ^= g >> 24; 6604 hval &= ~g; 6605 } 6606 return (hval); 6607 } 6608 6609 static dtrace_hash_t * 6610 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6611 { 6612 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6613 6614 hash->dth_stroffs = stroffs; 6615 hash->dth_nextoffs = nextoffs; 6616 hash->dth_prevoffs = prevoffs; 6617 6618 hash->dth_size = 1; 6619 hash->dth_mask = hash->dth_size - 1; 6620 6621 hash->dth_tab = kmem_zalloc(hash->dth_size * 6622 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6623 6624 return (hash); 6625 } 6626 6627 static void 6628 dtrace_hash_destroy(dtrace_hash_t *hash) 6629 { 6630 #ifdef DEBUG 6631 int i; 6632 6633 for (i = 0; i < hash->dth_size; i++) 6634 ASSERT(hash->dth_tab[i] == NULL); 6635 #endif 6636 6637 kmem_free(hash->dth_tab, 6638 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6639 kmem_free(hash, sizeof (dtrace_hash_t)); 6640 } 6641 6642 static void 6643 dtrace_hash_resize(dtrace_hash_t *hash) 6644 { 6645 int size = hash->dth_size, i, ndx; 6646 int new_size = hash->dth_size << 1; 6647 int new_mask = new_size - 1; 6648 dtrace_hashbucket_t **new_tab, *bucket, *next; 6649 6650 ASSERT((new_size & new_mask) == 0); 6651 6652 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6653 6654 for (i = 0; i < size; i++) { 6655 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6656 dtrace_probe_t *probe = bucket->dthb_chain; 6657 6658 ASSERT(probe != NULL); 6659 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6660 6661 next = bucket->dthb_next; 6662 bucket->dthb_next = new_tab[ndx]; 6663 new_tab[ndx] = bucket; 6664 } 6665 } 6666 6667 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6668 hash->dth_tab = new_tab; 6669 hash->dth_size = new_size; 6670 hash->dth_mask = new_mask; 6671 } 6672 6673 static void 6674 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6675 { 6676 int hashval = DTRACE_HASHSTR(hash, new); 6677 int ndx = hashval & hash->dth_mask; 6678 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6679 dtrace_probe_t **nextp, **prevp; 6680 6681 for (; bucket != NULL; bucket = bucket->dthb_next) { 6682 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6683 goto add; 6684 } 6685 6686 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6687 dtrace_hash_resize(hash); 6688 dtrace_hash_add(hash, new); 6689 return; 6690 } 6691 6692 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6693 bucket->dthb_next = hash->dth_tab[ndx]; 6694 hash->dth_tab[ndx] = bucket; 6695 hash->dth_nbuckets++; 6696 6697 add: 6698 nextp = DTRACE_HASHNEXT(hash, new); 6699 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6700 *nextp = bucket->dthb_chain; 6701 6702 if (bucket->dthb_chain != NULL) { 6703 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6704 ASSERT(*prevp == NULL); 6705 *prevp = new; 6706 } 6707 6708 bucket->dthb_chain = new; 6709 bucket->dthb_len++; 6710 } 6711 6712 static dtrace_probe_t * 6713 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6714 { 6715 int hashval = DTRACE_HASHSTR(hash, template); 6716 int ndx = hashval & hash->dth_mask; 6717 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6718 6719 for (; bucket != NULL; bucket = bucket->dthb_next) { 6720 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6721 return (bucket->dthb_chain); 6722 } 6723 6724 return (NULL); 6725 } 6726 6727 static int 6728 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6729 { 6730 int hashval = DTRACE_HASHSTR(hash, template); 6731 int ndx = hashval & hash->dth_mask; 6732 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6733 6734 for (; bucket != NULL; bucket = bucket->dthb_next) { 6735 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6736 return (bucket->dthb_len); 6737 } 6738 6739 return (0); 6740 } 6741 6742 static void 6743 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6744 { 6745 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6746 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6747 6748 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6749 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6750 6751 /* 6752 * Find the bucket that we're removing this probe from. 6753 */ 6754 for (; bucket != NULL; bucket = bucket->dthb_next) { 6755 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6756 break; 6757 } 6758 6759 ASSERT(bucket != NULL); 6760 6761 if (*prevp == NULL) { 6762 if (*nextp == NULL) { 6763 /* 6764 * The removed probe was the only probe on this 6765 * bucket; we need to remove the bucket. 6766 */ 6767 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6768 6769 ASSERT(bucket->dthb_chain == probe); 6770 ASSERT(b != NULL); 6771 6772 if (b == bucket) { 6773 hash->dth_tab[ndx] = bucket->dthb_next; 6774 } else { 6775 while (b->dthb_next != bucket) 6776 b = b->dthb_next; 6777 b->dthb_next = bucket->dthb_next; 6778 } 6779 6780 ASSERT(hash->dth_nbuckets > 0); 6781 hash->dth_nbuckets--; 6782 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6783 return; 6784 } 6785 6786 bucket->dthb_chain = *nextp; 6787 } else { 6788 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6789 } 6790 6791 if (*nextp != NULL) 6792 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6793 } 6794 6795 /* 6796 * DTrace Utility Functions 6797 * 6798 * These are random utility functions that are _not_ called from probe context. 6799 */ 6800 static int 6801 dtrace_badattr(const dtrace_attribute_t *a) 6802 { 6803 return (a->dtat_name > DTRACE_STABILITY_MAX || 6804 a->dtat_data > DTRACE_STABILITY_MAX || 6805 a->dtat_class > DTRACE_CLASS_MAX); 6806 } 6807 6808 /* 6809 * Return a duplicate copy of a string. If the specified string is NULL, 6810 * this function returns a zero-length string. 6811 */ 6812 static char * 6813 dtrace_strdup(const char *str) 6814 { 6815 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6816 6817 if (str != NULL) 6818 (void) strcpy(new, str); 6819 6820 return (new); 6821 } 6822 6823 #define DTRACE_ISALPHA(c) \ 6824 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6825 6826 static int 6827 dtrace_badname(const char *s) 6828 { 6829 char c; 6830 6831 if (s == NULL || (c = *s++) == '\0') 6832 return (0); 6833 6834 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6835 return (1); 6836 6837 while ((c = *s++) != '\0') { 6838 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6839 c != '-' && c != '_' && c != '.' && c != '`') 6840 return (1); 6841 } 6842 6843 return (0); 6844 } 6845 6846 static void 6847 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6848 { 6849 uint32_t priv; 6850 6851 #if defined(sun) 6852 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6853 /* 6854 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6855 */ 6856 priv = DTRACE_PRIV_ALL; 6857 } else { 6858 *uidp = crgetuid(cr); 6859 *zoneidp = crgetzoneid(cr); 6860 6861 priv = 0; 6862 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6863 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6864 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6865 priv |= DTRACE_PRIV_USER; 6866 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6867 priv |= DTRACE_PRIV_PROC; 6868 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6869 priv |= DTRACE_PRIV_OWNER; 6870 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6871 priv |= DTRACE_PRIV_ZONEOWNER; 6872 } 6873 #else 6874 priv = DTRACE_PRIV_ALL; 6875 #endif 6876 6877 *privp = priv; 6878 } 6879 6880 #ifdef DTRACE_ERRDEBUG 6881 static void 6882 dtrace_errdebug(const char *str) 6883 { 6884 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6885 int occupied = 0; 6886 6887 mutex_enter(&dtrace_errlock); 6888 dtrace_errlast = str; 6889 dtrace_errthread = curthread; 6890 6891 while (occupied++ < DTRACE_ERRHASHSZ) { 6892 if (dtrace_errhash[hval].dter_msg == str) { 6893 dtrace_errhash[hval].dter_count++; 6894 goto out; 6895 } 6896 6897 if (dtrace_errhash[hval].dter_msg != NULL) { 6898 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6899 continue; 6900 } 6901 6902 dtrace_errhash[hval].dter_msg = str; 6903 dtrace_errhash[hval].dter_count = 1; 6904 goto out; 6905 } 6906 6907 panic("dtrace: undersized error hash"); 6908 out: 6909 mutex_exit(&dtrace_errlock); 6910 } 6911 #endif 6912 6913 /* 6914 * DTrace Matching Functions 6915 * 6916 * These functions are used to match groups of probes, given some elements of 6917 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6918 */ 6919 static int 6920 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6921 zoneid_t zoneid) 6922 { 6923 if (priv != DTRACE_PRIV_ALL) { 6924 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6925 uint32_t match = priv & ppriv; 6926 6927 /* 6928 * No PRIV_DTRACE_* privileges... 6929 */ 6930 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6931 DTRACE_PRIV_KERNEL)) == 0) 6932 return (0); 6933 6934 /* 6935 * No matching bits, but there were bits to match... 6936 */ 6937 if (match == 0 && ppriv != 0) 6938 return (0); 6939 6940 /* 6941 * Need to have permissions to the process, but don't... 6942 */ 6943 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6944 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6945 return (0); 6946 } 6947 6948 /* 6949 * Need to be in the same zone unless we possess the 6950 * privilege to examine all zones. 6951 */ 6952 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6953 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6954 return (0); 6955 } 6956 } 6957 6958 return (1); 6959 } 6960 6961 /* 6962 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6963 * consists of input pattern strings and an ops-vector to evaluate them. 6964 * This function returns >0 for match, 0 for no match, and <0 for error. 6965 */ 6966 static int 6967 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6968 uint32_t priv, uid_t uid, zoneid_t zoneid) 6969 { 6970 dtrace_provider_t *pvp = prp->dtpr_provider; 6971 int rv; 6972 6973 if (pvp->dtpv_defunct) 6974 return (0); 6975 6976 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6977 return (rv); 6978 6979 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6980 return (rv); 6981 6982 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6983 return (rv); 6984 6985 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6986 return (rv); 6987 6988 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6989 return (0); 6990 6991 return (rv); 6992 } 6993 6994 /* 6995 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6996 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6997 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6998 * In addition, all of the recursion cases except for '*' matching have been 6999 * unwound. For '*', we still implement recursive evaluation, but a depth 7000 * counter is maintained and matching is aborted if we recurse too deep. 7001 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7002 */ 7003 static int 7004 dtrace_match_glob(const char *s, const char *p, int depth) 7005 { 7006 const char *olds; 7007 char s1, c; 7008 int gs; 7009 7010 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7011 return (-1); 7012 7013 if (s == NULL) 7014 s = ""; /* treat NULL as empty string */ 7015 7016 top: 7017 olds = s; 7018 s1 = *s++; 7019 7020 if (p == NULL) 7021 return (0); 7022 7023 if ((c = *p++) == '\0') 7024 return (s1 == '\0'); 7025 7026 switch (c) { 7027 case '[': { 7028 int ok = 0, notflag = 0; 7029 char lc = '\0'; 7030 7031 if (s1 == '\0') 7032 return (0); 7033 7034 if (*p == '!') { 7035 notflag = 1; 7036 p++; 7037 } 7038 7039 if ((c = *p++) == '\0') 7040 return (0); 7041 7042 do { 7043 if (c == '-' && lc != '\0' && *p != ']') { 7044 if ((c = *p++) == '\0') 7045 return (0); 7046 if (c == '\\' && (c = *p++) == '\0') 7047 return (0); 7048 7049 if (notflag) { 7050 if (s1 < lc || s1 > c) 7051 ok++; 7052 else 7053 return (0); 7054 } else if (lc <= s1 && s1 <= c) 7055 ok++; 7056 7057 } else if (c == '\\' && (c = *p++) == '\0') 7058 return (0); 7059 7060 lc = c; /* save left-hand 'c' for next iteration */ 7061 7062 if (notflag) { 7063 if (s1 != c) 7064 ok++; 7065 else 7066 return (0); 7067 } else if (s1 == c) 7068 ok++; 7069 7070 if ((c = *p++) == '\0') 7071 return (0); 7072 7073 } while (c != ']'); 7074 7075 if (ok) 7076 goto top; 7077 7078 return (0); 7079 } 7080 7081 case '\\': 7082 if ((c = *p++) == '\0') 7083 return (0); 7084 /*FALLTHRU*/ 7085 7086 default: 7087 if (c != s1) 7088 return (0); 7089 /*FALLTHRU*/ 7090 7091 case '?': 7092 if (s1 != '\0') 7093 goto top; 7094 return (0); 7095 7096 case '*': 7097 while (*p == '*') 7098 p++; /* consecutive *'s are identical to a single one */ 7099 7100 if (*p == '\0') 7101 return (1); 7102 7103 for (s = olds; *s != '\0'; s++) { 7104 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7105 return (gs); 7106 } 7107 7108 return (0); 7109 } 7110 } 7111 7112 /*ARGSUSED*/ 7113 static int 7114 dtrace_match_string(const char *s, const char *p, int depth) 7115 { 7116 return (s != NULL && strcmp(s, p) == 0); 7117 } 7118 7119 /*ARGSUSED*/ 7120 static int 7121 dtrace_match_nul(const char *s, const char *p, int depth) 7122 { 7123 return (1); /* always match the empty pattern */ 7124 } 7125 7126 /*ARGSUSED*/ 7127 static int 7128 dtrace_match_nonzero(const char *s, const char *p, int depth) 7129 { 7130 return (s != NULL && s[0] != '\0'); 7131 } 7132 7133 static int 7134 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7135 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7136 { 7137 dtrace_probe_t template, *probe; 7138 dtrace_hash_t *hash = NULL; 7139 int len, best = INT_MAX, nmatched = 0; 7140 dtrace_id_t i; 7141 7142 ASSERT(MUTEX_HELD(&dtrace_lock)); 7143 7144 /* 7145 * If the probe ID is specified in the key, just lookup by ID and 7146 * invoke the match callback once if a matching probe is found. 7147 */ 7148 if (pkp->dtpk_id != DTRACE_IDNONE) { 7149 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7150 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7151 (void) (*matched)(probe, arg); 7152 nmatched++; 7153 } 7154 return (nmatched); 7155 } 7156 7157 template.dtpr_mod = (char *)pkp->dtpk_mod; 7158 template.dtpr_func = (char *)pkp->dtpk_func; 7159 template.dtpr_name = (char *)pkp->dtpk_name; 7160 7161 /* 7162 * We want to find the most distinct of the module name, function 7163 * name, and name. So for each one that is not a glob pattern or 7164 * empty string, we perform a lookup in the corresponding hash and 7165 * use the hash table with the fewest collisions to do our search. 7166 */ 7167 if (pkp->dtpk_mmatch == &dtrace_match_string && 7168 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7169 best = len; 7170 hash = dtrace_bymod; 7171 } 7172 7173 if (pkp->dtpk_fmatch == &dtrace_match_string && 7174 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7175 best = len; 7176 hash = dtrace_byfunc; 7177 } 7178 7179 if (pkp->dtpk_nmatch == &dtrace_match_string && 7180 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7181 best = len; 7182 hash = dtrace_byname; 7183 } 7184 7185 /* 7186 * If we did not select a hash table, iterate over every probe and 7187 * invoke our callback for each one that matches our input probe key. 7188 */ 7189 if (hash == NULL) { 7190 for (i = 0; i < dtrace_nprobes; i++) { 7191 if ((probe = dtrace_probes[i]) == NULL || 7192 dtrace_match_probe(probe, pkp, priv, uid, 7193 zoneid) <= 0) 7194 continue; 7195 7196 nmatched++; 7197 7198 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7199 break; 7200 } 7201 7202 return (nmatched); 7203 } 7204 7205 /* 7206 * If we selected a hash table, iterate over each probe of the same key 7207 * name and invoke the callback for every probe that matches the other 7208 * attributes of our input probe key. 7209 */ 7210 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7211 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7212 7213 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7214 continue; 7215 7216 nmatched++; 7217 7218 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7219 break; 7220 } 7221 7222 return (nmatched); 7223 } 7224 7225 /* 7226 * Return the function pointer dtrace_probecmp() should use to compare the 7227 * specified pattern with a string. For NULL or empty patterns, we select 7228 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7229 * For non-empty non-glob strings, we use dtrace_match_string(). 7230 */ 7231 static dtrace_probekey_f * 7232 dtrace_probekey_func(const char *p) 7233 { 7234 char c; 7235 7236 if (p == NULL || *p == '\0') 7237 return (&dtrace_match_nul); 7238 7239 while ((c = *p++) != '\0') { 7240 if (c == '[' || c == '?' || c == '*' || c == '\\') 7241 return (&dtrace_match_glob); 7242 } 7243 7244 return (&dtrace_match_string); 7245 } 7246 7247 /* 7248 * Build a probe comparison key for use with dtrace_match_probe() from the 7249 * given probe description. By convention, a null key only matches anchored 7250 * probes: if each field is the empty string, reset dtpk_fmatch to 7251 * dtrace_match_nonzero(). 7252 */ 7253 static void 7254 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7255 { 7256 pkp->dtpk_prov = pdp->dtpd_provider; 7257 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7258 7259 pkp->dtpk_mod = pdp->dtpd_mod; 7260 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7261 7262 pkp->dtpk_func = pdp->dtpd_func; 7263 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7264 7265 pkp->dtpk_name = pdp->dtpd_name; 7266 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7267 7268 pkp->dtpk_id = pdp->dtpd_id; 7269 7270 if (pkp->dtpk_id == DTRACE_IDNONE && 7271 pkp->dtpk_pmatch == &dtrace_match_nul && 7272 pkp->dtpk_mmatch == &dtrace_match_nul && 7273 pkp->dtpk_fmatch == &dtrace_match_nul && 7274 pkp->dtpk_nmatch == &dtrace_match_nul) 7275 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7276 } 7277 7278 /* 7279 * DTrace Provider-to-Framework API Functions 7280 * 7281 * These functions implement much of the Provider-to-Framework API, as 7282 * described in <sys/dtrace.h>. The parts of the API not in this section are 7283 * the functions in the API for probe management (found below), and 7284 * dtrace_probe() itself (found above). 7285 */ 7286 7287 /* 7288 * Register the calling provider with the DTrace framework. This should 7289 * generally be called by DTrace providers in their attach(9E) entry point. 7290 */ 7291 int 7292 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7293 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7294 { 7295 dtrace_provider_t *provider; 7296 7297 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7298 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7299 "arguments", name ? name : "<NULL>"); 7300 return (EINVAL); 7301 } 7302 7303 if (name[0] == '\0' || dtrace_badname(name)) { 7304 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7305 "provider name", name); 7306 return (EINVAL); 7307 } 7308 7309 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7310 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7311 pops->dtps_destroy == NULL || 7312 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7313 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7314 "provider ops", name); 7315 return (EINVAL); 7316 } 7317 7318 if (dtrace_badattr(&pap->dtpa_provider) || 7319 dtrace_badattr(&pap->dtpa_mod) || 7320 dtrace_badattr(&pap->dtpa_func) || 7321 dtrace_badattr(&pap->dtpa_name) || 7322 dtrace_badattr(&pap->dtpa_args)) { 7323 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7324 "provider attributes", name); 7325 return (EINVAL); 7326 } 7327 7328 if (priv & ~DTRACE_PRIV_ALL) { 7329 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7330 "privilege attributes", name); 7331 return (EINVAL); 7332 } 7333 7334 if ((priv & DTRACE_PRIV_KERNEL) && 7335 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7336 pops->dtps_usermode == NULL) { 7337 cmn_err(CE_WARN, "failed to register provider '%s': need " 7338 "dtps_usermode() op for given privilege attributes", name); 7339 return (EINVAL); 7340 } 7341 7342 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7343 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7344 (void) strcpy(provider->dtpv_name, name); 7345 7346 provider->dtpv_attr = *pap; 7347 provider->dtpv_priv.dtpp_flags = priv; 7348 if (cr != NULL) { 7349 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7350 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7351 } 7352 provider->dtpv_pops = *pops; 7353 7354 if (pops->dtps_provide == NULL) { 7355 ASSERT(pops->dtps_provide_module != NULL); 7356 provider->dtpv_pops.dtps_provide = 7357 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7358 } 7359 7360 if (pops->dtps_provide_module == NULL) { 7361 ASSERT(pops->dtps_provide != NULL); 7362 provider->dtpv_pops.dtps_provide_module = 7363 (void (*)(void *, modctl_t *))dtrace_nullop; 7364 } 7365 7366 if (pops->dtps_suspend == NULL) { 7367 ASSERT(pops->dtps_resume == NULL); 7368 provider->dtpv_pops.dtps_suspend = 7369 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7370 provider->dtpv_pops.dtps_resume = 7371 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7372 } 7373 7374 provider->dtpv_arg = arg; 7375 *idp = (dtrace_provider_id_t)provider; 7376 7377 if (pops == &dtrace_provider_ops) { 7378 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7379 ASSERT(MUTEX_HELD(&dtrace_lock)); 7380 ASSERT(dtrace_anon.dta_enabling == NULL); 7381 7382 /* 7383 * We make sure that the DTrace provider is at the head of 7384 * the provider chain. 7385 */ 7386 provider->dtpv_next = dtrace_provider; 7387 dtrace_provider = provider; 7388 return (0); 7389 } 7390 7391 mutex_enter(&dtrace_provider_lock); 7392 mutex_enter(&dtrace_lock); 7393 7394 /* 7395 * If there is at least one provider registered, we'll add this 7396 * provider after the first provider. 7397 */ 7398 if (dtrace_provider != NULL) { 7399 provider->dtpv_next = dtrace_provider->dtpv_next; 7400 dtrace_provider->dtpv_next = provider; 7401 } else { 7402 dtrace_provider = provider; 7403 } 7404 7405 if (dtrace_retained != NULL) { 7406 dtrace_enabling_provide(provider); 7407 7408 /* 7409 * Now we need to call dtrace_enabling_matchall() -- which 7410 * will acquire cpu_lock and dtrace_lock. We therefore need 7411 * to drop all of our locks before calling into it... 7412 */ 7413 mutex_exit(&dtrace_lock); 7414 mutex_exit(&dtrace_provider_lock); 7415 dtrace_enabling_matchall(); 7416 7417 return (0); 7418 } 7419 7420 mutex_exit(&dtrace_lock); 7421 mutex_exit(&dtrace_provider_lock); 7422 7423 return (0); 7424 } 7425 7426 /* 7427 * Unregister the specified provider from the DTrace framework. This should 7428 * generally be called by DTrace providers in their detach(9E) entry point. 7429 */ 7430 int 7431 dtrace_unregister(dtrace_provider_id_t id) 7432 { 7433 dtrace_provider_t *old = (dtrace_provider_t *)id; 7434 dtrace_provider_t *prev = NULL; 7435 int i, self = 0; 7436 dtrace_probe_t *probe, *first = NULL; 7437 7438 if (old->dtpv_pops.dtps_enable == 7439 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7440 /* 7441 * If DTrace itself is the provider, we're called with locks 7442 * already held. 7443 */ 7444 ASSERT(old == dtrace_provider); 7445 #if defined(sun) 7446 ASSERT(dtrace_devi != NULL); 7447 #endif 7448 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7449 ASSERT(MUTEX_HELD(&dtrace_lock)); 7450 self = 1; 7451 7452 if (dtrace_provider->dtpv_next != NULL) { 7453 /* 7454 * There's another provider here; return failure. 7455 */ 7456 return (EBUSY); 7457 } 7458 } else { 7459 mutex_enter(&dtrace_provider_lock); 7460 mutex_enter(&mod_lock); 7461 mutex_enter(&dtrace_lock); 7462 } 7463 7464 /* 7465 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7466 * probes, we refuse to let providers slither away, unless this 7467 * provider has already been explicitly invalidated. 7468 */ 7469 if (!old->dtpv_defunct && 7470 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7471 dtrace_anon.dta_state->dts_necbs > 0))) { 7472 if (!self) { 7473 mutex_exit(&dtrace_lock); 7474 mutex_exit(&mod_lock); 7475 mutex_exit(&dtrace_provider_lock); 7476 } 7477 return (EBUSY); 7478 } 7479 7480 /* 7481 * Attempt to destroy the probes associated with this provider. 7482 */ 7483 for (i = 0; i < dtrace_nprobes; i++) { 7484 if ((probe = dtrace_probes[i]) == NULL) 7485 continue; 7486 7487 if (probe->dtpr_provider != old) 7488 continue; 7489 7490 if (probe->dtpr_ecb == NULL) 7491 continue; 7492 7493 /* 7494 * We have at least one ECB; we can't remove this provider. 7495 */ 7496 if (!self) { 7497 mutex_exit(&dtrace_lock); 7498 mutex_exit(&mod_lock); 7499 mutex_exit(&dtrace_provider_lock); 7500 } 7501 return (EBUSY); 7502 } 7503 7504 /* 7505 * All of the probes for this provider are disabled; we can safely 7506 * remove all of them from their hash chains and from the probe array. 7507 */ 7508 for (i = 0; i < dtrace_nprobes; i++) { 7509 if ((probe = dtrace_probes[i]) == NULL) 7510 continue; 7511 7512 if (probe->dtpr_provider != old) 7513 continue; 7514 7515 dtrace_probes[i] = NULL; 7516 7517 dtrace_hash_remove(dtrace_bymod, probe); 7518 dtrace_hash_remove(dtrace_byfunc, probe); 7519 dtrace_hash_remove(dtrace_byname, probe); 7520 7521 if (first == NULL) { 7522 first = probe; 7523 probe->dtpr_nextmod = NULL; 7524 } else { 7525 probe->dtpr_nextmod = first; 7526 first = probe; 7527 } 7528 } 7529 7530 /* 7531 * The provider's probes have been removed from the hash chains and 7532 * from the probe array. Now issue a dtrace_sync() to be sure that 7533 * everyone has cleared out from any probe array processing. 7534 */ 7535 dtrace_sync(); 7536 7537 for (probe = first; probe != NULL; probe = first) { 7538 first = probe->dtpr_nextmod; 7539 7540 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7541 probe->dtpr_arg); 7542 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7543 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7544 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7545 #if defined(sun) 7546 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7547 #else 7548 free_unr(dtrace_arena, probe->dtpr_id); 7549 #endif 7550 kmem_free(probe, sizeof (dtrace_probe_t)); 7551 } 7552 7553 if ((prev = dtrace_provider) == old) { 7554 #if defined(sun) 7555 ASSERT(self || dtrace_devi == NULL); 7556 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7557 #endif 7558 dtrace_provider = old->dtpv_next; 7559 } else { 7560 while (prev != NULL && prev->dtpv_next != old) 7561 prev = prev->dtpv_next; 7562 7563 if (prev == NULL) { 7564 panic("attempt to unregister non-existent " 7565 "dtrace provider %p\n", (void *)id); 7566 } 7567 7568 prev->dtpv_next = old->dtpv_next; 7569 } 7570 7571 if (!self) { 7572 mutex_exit(&dtrace_lock); 7573 mutex_exit(&mod_lock); 7574 mutex_exit(&dtrace_provider_lock); 7575 } 7576 7577 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7578 kmem_free(old, sizeof (dtrace_provider_t)); 7579 7580 return (0); 7581 } 7582 7583 /* 7584 * Invalidate the specified provider. All subsequent probe lookups for the 7585 * specified provider will fail, but its probes will not be removed. 7586 */ 7587 void 7588 dtrace_invalidate(dtrace_provider_id_t id) 7589 { 7590 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7591 7592 ASSERT(pvp->dtpv_pops.dtps_enable != 7593 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7594 7595 mutex_enter(&dtrace_provider_lock); 7596 mutex_enter(&dtrace_lock); 7597 7598 pvp->dtpv_defunct = 1; 7599 7600 mutex_exit(&dtrace_lock); 7601 mutex_exit(&dtrace_provider_lock); 7602 } 7603 7604 /* 7605 * Indicate whether or not DTrace has attached. 7606 */ 7607 int 7608 dtrace_attached(void) 7609 { 7610 /* 7611 * dtrace_provider will be non-NULL iff the DTrace driver has 7612 * attached. (It's non-NULL because DTrace is always itself a 7613 * provider.) 7614 */ 7615 return (dtrace_provider != NULL); 7616 } 7617 7618 /* 7619 * Remove all the unenabled probes for the given provider. This function is 7620 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7621 * -- just as many of its associated probes as it can. 7622 */ 7623 int 7624 dtrace_condense(dtrace_provider_id_t id) 7625 { 7626 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7627 int i; 7628 dtrace_probe_t *probe; 7629 7630 /* 7631 * Make sure this isn't the dtrace provider itself. 7632 */ 7633 ASSERT(prov->dtpv_pops.dtps_enable != 7634 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7635 7636 mutex_enter(&dtrace_provider_lock); 7637 mutex_enter(&dtrace_lock); 7638 7639 /* 7640 * Attempt to destroy the probes associated with this provider. 7641 */ 7642 for (i = 0; i < dtrace_nprobes; i++) { 7643 if ((probe = dtrace_probes[i]) == NULL) 7644 continue; 7645 7646 if (probe->dtpr_provider != prov) 7647 continue; 7648 7649 if (probe->dtpr_ecb != NULL) 7650 continue; 7651 7652 dtrace_probes[i] = NULL; 7653 7654 dtrace_hash_remove(dtrace_bymod, probe); 7655 dtrace_hash_remove(dtrace_byfunc, probe); 7656 dtrace_hash_remove(dtrace_byname, probe); 7657 7658 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7659 probe->dtpr_arg); 7660 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7661 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7662 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7663 kmem_free(probe, sizeof (dtrace_probe_t)); 7664 #if defined(sun) 7665 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7666 #else 7667 free_unr(dtrace_arena, i + 1); 7668 #endif 7669 } 7670 7671 mutex_exit(&dtrace_lock); 7672 mutex_exit(&dtrace_provider_lock); 7673 7674 return (0); 7675 } 7676 7677 /* 7678 * DTrace Probe Management Functions 7679 * 7680 * The functions in this section perform the DTrace probe management, 7681 * including functions to create probes, look-up probes, and call into the 7682 * providers to request that probes be provided. Some of these functions are 7683 * in the Provider-to-Framework API; these functions can be identified by the 7684 * fact that they are not declared "static". 7685 */ 7686 7687 /* 7688 * Create a probe with the specified module name, function name, and name. 7689 */ 7690 dtrace_id_t 7691 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7692 const char *func, const char *name, int aframes, void *arg) 7693 { 7694 dtrace_probe_t *probe, **probes; 7695 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7696 dtrace_id_t id; 7697 7698 if (provider == dtrace_provider) { 7699 ASSERT(MUTEX_HELD(&dtrace_lock)); 7700 } else { 7701 mutex_enter(&dtrace_lock); 7702 } 7703 7704 #if defined(sun) 7705 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7706 VM_BESTFIT | VM_SLEEP); 7707 #else 7708 id = alloc_unr(dtrace_arena); 7709 #endif 7710 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7711 7712 probe->dtpr_id = id; 7713 probe->dtpr_gen = dtrace_probegen++; 7714 probe->dtpr_mod = dtrace_strdup(mod); 7715 probe->dtpr_func = dtrace_strdup(func); 7716 probe->dtpr_name = dtrace_strdup(name); 7717 probe->dtpr_arg = arg; 7718 probe->dtpr_aframes = aframes; 7719 probe->dtpr_provider = provider; 7720 7721 dtrace_hash_add(dtrace_bymod, probe); 7722 dtrace_hash_add(dtrace_byfunc, probe); 7723 dtrace_hash_add(dtrace_byname, probe); 7724 7725 if (id - 1 >= dtrace_nprobes) { 7726 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7727 size_t nsize = osize << 1; 7728 7729 if (nsize == 0) { 7730 ASSERT(osize == 0); 7731 ASSERT(dtrace_probes == NULL); 7732 nsize = sizeof (dtrace_probe_t *); 7733 } 7734 7735 probes = kmem_zalloc(nsize, KM_SLEEP); 7736 7737 if (dtrace_probes == NULL) { 7738 ASSERT(osize == 0); 7739 dtrace_probes = probes; 7740 dtrace_nprobes = 1; 7741 } else { 7742 dtrace_probe_t **oprobes = dtrace_probes; 7743 7744 bcopy(oprobes, probes, osize); 7745 dtrace_membar_producer(); 7746 dtrace_probes = probes; 7747 7748 dtrace_sync(); 7749 7750 /* 7751 * All CPUs are now seeing the new probes array; we can 7752 * safely free the old array. 7753 */ 7754 kmem_free(oprobes, osize); 7755 dtrace_nprobes <<= 1; 7756 } 7757 7758 ASSERT(id - 1 < dtrace_nprobes); 7759 } 7760 7761 ASSERT(dtrace_probes[id - 1] == NULL); 7762 dtrace_probes[id - 1] = probe; 7763 7764 if (provider != dtrace_provider) 7765 mutex_exit(&dtrace_lock); 7766 7767 return (id); 7768 } 7769 7770 static dtrace_probe_t * 7771 dtrace_probe_lookup_id(dtrace_id_t id) 7772 { 7773 ASSERT(MUTEX_HELD(&dtrace_lock)); 7774 7775 if (id == 0 || id > dtrace_nprobes) 7776 return (NULL); 7777 7778 return (dtrace_probes[id - 1]); 7779 } 7780 7781 static int 7782 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7783 { 7784 *((dtrace_id_t *)arg) = probe->dtpr_id; 7785 7786 return (DTRACE_MATCH_DONE); 7787 } 7788 7789 /* 7790 * Look up a probe based on provider and one or more of module name, function 7791 * name and probe name. 7792 */ 7793 dtrace_id_t 7794 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7795 char *func, char *name) 7796 { 7797 dtrace_probekey_t pkey; 7798 dtrace_id_t id; 7799 int match; 7800 7801 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7802 pkey.dtpk_pmatch = &dtrace_match_string; 7803 pkey.dtpk_mod = mod; 7804 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7805 pkey.dtpk_func = func; 7806 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7807 pkey.dtpk_name = name; 7808 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7809 pkey.dtpk_id = DTRACE_IDNONE; 7810 7811 mutex_enter(&dtrace_lock); 7812 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7813 dtrace_probe_lookup_match, &id); 7814 mutex_exit(&dtrace_lock); 7815 7816 ASSERT(match == 1 || match == 0); 7817 return (match ? id : 0); 7818 } 7819 7820 /* 7821 * Returns the probe argument associated with the specified probe. 7822 */ 7823 void * 7824 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7825 { 7826 dtrace_probe_t *probe; 7827 void *rval = NULL; 7828 7829 mutex_enter(&dtrace_lock); 7830 7831 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7832 probe->dtpr_provider == (dtrace_provider_t *)id) 7833 rval = probe->dtpr_arg; 7834 7835 mutex_exit(&dtrace_lock); 7836 7837 return (rval); 7838 } 7839 7840 /* 7841 * Copy a probe into a probe description. 7842 */ 7843 static void 7844 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7845 { 7846 bzero(pdp, sizeof (dtrace_probedesc_t)); 7847 pdp->dtpd_id = prp->dtpr_id; 7848 7849 (void) strncpy(pdp->dtpd_provider, 7850 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7851 7852 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7853 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7854 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7855 } 7856 7857 #if !defined(sun) 7858 static int 7859 dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7860 { 7861 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7862 7863 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7864 7865 return(0); 7866 } 7867 #endif 7868 7869 7870 /* 7871 * Called to indicate that a probe -- or probes -- should be provided by a 7872 * specfied provider. If the specified description is NULL, the provider will 7873 * be told to provide all of its probes. (This is done whenever a new 7874 * consumer comes along, or whenever a retained enabling is to be matched.) If 7875 * the specified description is non-NULL, the provider is given the 7876 * opportunity to dynamically provide the specified probe, allowing providers 7877 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7878 * probes.) If the provider is NULL, the operations will be applied to all 7879 * providers; if the provider is non-NULL the operations will only be applied 7880 * to the specified provider. The dtrace_provider_lock must be held, and the 7881 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7882 * will need to grab the dtrace_lock when it reenters the framework through 7883 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7884 */ 7885 static void 7886 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7887 { 7888 #if defined(sun) 7889 modctl_t *ctl; 7890 #endif 7891 int all = 0; 7892 7893 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7894 7895 if (prv == NULL) { 7896 all = 1; 7897 prv = dtrace_provider; 7898 } 7899 7900 do { 7901 /* 7902 * First, call the blanket provide operation. 7903 */ 7904 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7905 7906 /* 7907 * Now call the per-module provide operation. We will grab 7908 * mod_lock to prevent the list from being modified. Note 7909 * that this also prevents the mod_busy bits from changing. 7910 * (mod_busy can only be changed with mod_lock held.) 7911 */ 7912 mutex_enter(&mod_lock); 7913 7914 #if defined(sun) 7915 ctl = &modules; 7916 do { 7917 if (ctl->mod_busy || ctl->mod_mp == NULL) 7918 continue; 7919 7920 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7921 7922 } while ((ctl = ctl->mod_next) != &modules); 7923 #else 7924 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 7925 #endif 7926 7927 mutex_exit(&mod_lock); 7928 } while (all && (prv = prv->dtpv_next) != NULL); 7929 } 7930 7931 #if defined(sun) 7932 /* 7933 * Iterate over each probe, and call the Framework-to-Provider API function 7934 * denoted by offs. 7935 */ 7936 static void 7937 dtrace_probe_foreach(uintptr_t offs) 7938 { 7939 dtrace_provider_t *prov; 7940 void (*func)(void *, dtrace_id_t, void *); 7941 dtrace_probe_t *probe; 7942 dtrace_icookie_t cookie; 7943 int i; 7944 7945 /* 7946 * We disable interrupts to walk through the probe array. This is 7947 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7948 * won't see stale data. 7949 */ 7950 cookie = dtrace_interrupt_disable(); 7951 7952 for (i = 0; i < dtrace_nprobes; i++) { 7953 if ((probe = dtrace_probes[i]) == NULL) 7954 continue; 7955 7956 if (probe->dtpr_ecb == NULL) { 7957 /* 7958 * This probe isn't enabled -- don't call the function. 7959 */ 7960 continue; 7961 } 7962 7963 prov = probe->dtpr_provider; 7964 func = *((void(**)(void *, dtrace_id_t, void *)) 7965 ((uintptr_t)&prov->dtpv_pops + offs)); 7966 7967 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7968 } 7969 7970 dtrace_interrupt_enable(cookie); 7971 } 7972 #endif 7973 7974 static int 7975 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7976 { 7977 dtrace_probekey_t pkey; 7978 uint32_t priv; 7979 uid_t uid; 7980 zoneid_t zoneid; 7981 7982 ASSERT(MUTEX_HELD(&dtrace_lock)); 7983 dtrace_ecb_create_cache = NULL; 7984 7985 if (desc == NULL) { 7986 /* 7987 * If we're passed a NULL description, we're being asked to 7988 * create an ECB with a NULL probe. 7989 */ 7990 (void) dtrace_ecb_create_enable(NULL, enab); 7991 return (0); 7992 } 7993 7994 dtrace_probekey(desc, &pkey); 7995 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7996 &priv, &uid, &zoneid); 7997 7998 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7999 enab)); 8000 } 8001 8002 /* 8003 * DTrace Helper Provider Functions 8004 */ 8005 static void 8006 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8007 { 8008 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8009 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8010 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8011 } 8012 8013 static void 8014 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8015 const dof_provider_t *dofprov, char *strtab) 8016 { 8017 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8018 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8019 dofprov->dofpv_provattr); 8020 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8021 dofprov->dofpv_modattr); 8022 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8023 dofprov->dofpv_funcattr); 8024 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8025 dofprov->dofpv_nameattr); 8026 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8027 dofprov->dofpv_argsattr); 8028 } 8029 8030 static void 8031 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8032 { 8033 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8034 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8035 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8036 dof_provider_t *provider; 8037 dof_probe_t *probe; 8038 uint32_t *off, *enoff; 8039 uint8_t *arg; 8040 char *strtab; 8041 uint_t i, nprobes; 8042 dtrace_helper_provdesc_t dhpv; 8043 dtrace_helper_probedesc_t dhpb; 8044 dtrace_meta_t *meta = dtrace_meta_pid; 8045 dtrace_mops_t *mops = &meta->dtm_mops; 8046 void *parg; 8047 8048 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8049 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8050 provider->dofpv_strtab * dof->dofh_secsize); 8051 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8052 provider->dofpv_probes * dof->dofh_secsize); 8053 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8054 provider->dofpv_prargs * dof->dofh_secsize); 8055 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8056 provider->dofpv_proffs * dof->dofh_secsize); 8057 8058 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8059 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8060 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8061 enoff = NULL; 8062 8063 /* 8064 * See dtrace_helper_provider_validate(). 8065 */ 8066 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8067 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8068 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8069 provider->dofpv_prenoffs * dof->dofh_secsize); 8070 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8071 } 8072 8073 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8074 8075 /* 8076 * Create the provider. 8077 */ 8078 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8079 8080 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8081 return; 8082 8083 meta->dtm_count++; 8084 8085 /* 8086 * Create the probes. 8087 */ 8088 for (i = 0; i < nprobes; i++) { 8089 probe = (dof_probe_t *)(uintptr_t)(daddr + 8090 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8091 8092 dhpb.dthpb_mod = dhp->dofhp_mod; 8093 dhpb.dthpb_func = strtab + probe->dofpr_func; 8094 dhpb.dthpb_name = strtab + probe->dofpr_name; 8095 dhpb.dthpb_base = probe->dofpr_addr; 8096 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8097 dhpb.dthpb_noffs = probe->dofpr_noffs; 8098 if (enoff != NULL) { 8099 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8100 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8101 } else { 8102 dhpb.dthpb_enoffs = NULL; 8103 dhpb.dthpb_nenoffs = 0; 8104 } 8105 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8106 dhpb.dthpb_nargc = probe->dofpr_nargc; 8107 dhpb.dthpb_xargc = probe->dofpr_xargc; 8108 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8109 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8110 8111 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8112 } 8113 } 8114 8115 static void 8116 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8117 { 8118 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8119 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8120 int i; 8121 8122 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8123 8124 for (i = 0; i < dof->dofh_secnum; i++) { 8125 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8126 dof->dofh_secoff + i * dof->dofh_secsize); 8127 8128 if (sec->dofs_type != DOF_SECT_PROVIDER) 8129 continue; 8130 8131 dtrace_helper_provide_one(dhp, sec, pid); 8132 } 8133 8134 /* 8135 * We may have just created probes, so we must now rematch against 8136 * any retained enablings. Note that this call will acquire both 8137 * cpu_lock and dtrace_lock; the fact that we are holding 8138 * dtrace_meta_lock now is what defines the ordering with respect to 8139 * these three locks. 8140 */ 8141 dtrace_enabling_matchall(); 8142 } 8143 8144 #if defined(sun) 8145 static void 8146 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8147 { 8148 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8149 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8150 dof_sec_t *str_sec; 8151 dof_provider_t *provider; 8152 char *strtab; 8153 dtrace_helper_provdesc_t dhpv; 8154 dtrace_meta_t *meta = dtrace_meta_pid; 8155 dtrace_mops_t *mops = &meta->dtm_mops; 8156 8157 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8158 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8159 provider->dofpv_strtab * dof->dofh_secsize); 8160 8161 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8162 8163 /* 8164 * Create the provider. 8165 */ 8166 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8167 8168 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8169 8170 meta->dtm_count--; 8171 } 8172 8173 static void 8174 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8175 { 8176 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8177 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8178 int i; 8179 8180 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8181 8182 for (i = 0; i < dof->dofh_secnum; i++) { 8183 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8184 dof->dofh_secoff + i * dof->dofh_secsize); 8185 8186 if (sec->dofs_type != DOF_SECT_PROVIDER) 8187 continue; 8188 8189 dtrace_helper_provider_remove_one(dhp, sec, pid); 8190 } 8191 } 8192 #endif 8193 8194 /* 8195 * DTrace Meta Provider-to-Framework API Functions 8196 * 8197 * These functions implement the Meta Provider-to-Framework API, as described 8198 * in <sys/dtrace.h>. 8199 */ 8200 int 8201 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8202 dtrace_meta_provider_id_t *idp) 8203 { 8204 dtrace_meta_t *meta; 8205 dtrace_helpers_t *help, *next; 8206 int i; 8207 8208 *idp = DTRACE_METAPROVNONE; 8209 8210 /* 8211 * We strictly don't need the name, but we hold onto it for 8212 * debuggability. All hail error queues! 8213 */ 8214 if (name == NULL) { 8215 cmn_err(CE_WARN, "failed to register meta-provider: " 8216 "invalid name"); 8217 return (EINVAL); 8218 } 8219 8220 if (mops == NULL || 8221 mops->dtms_create_probe == NULL || 8222 mops->dtms_provide_pid == NULL || 8223 mops->dtms_remove_pid == NULL) { 8224 cmn_err(CE_WARN, "failed to register meta-register %s: " 8225 "invalid ops", name); 8226 return (EINVAL); 8227 } 8228 8229 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8230 meta->dtm_mops = *mops; 8231 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8232 (void) strcpy(meta->dtm_name, name); 8233 meta->dtm_arg = arg; 8234 8235 mutex_enter(&dtrace_meta_lock); 8236 mutex_enter(&dtrace_lock); 8237 8238 if (dtrace_meta_pid != NULL) { 8239 mutex_exit(&dtrace_lock); 8240 mutex_exit(&dtrace_meta_lock); 8241 cmn_err(CE_WARN, "failed to register meta-register %s: " 8242 "user-land meta-provider exists", name); 8243 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8244 kmem_free(meta, sizeof (dtrace_meta_t)); 8245 return (EINVAL); 8246 } 8247 8248 dtrace_meta_pid = meta; 8249 *idp = (dtrace_meta_provider_id_t)meta; 8250 8251 /* 8252 * If there are providers and probes ready to go, pass them 8253 * off to the new meta provider now. 8254 */ 8255 8256 help = dtrace_deferred_pid; 8257 dtrace_deferred_pid = NULL; 8258 8259 mutex_exit(&dtrace_lock); 8260 8261 while (help != NULL) { 8262 for (i = 0; i < help->dthps_nprovs; i++) { 8263 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8264 help->dthps_pid); 8265 } 8266 8267 next = help->dthps_next; 8268 help->dthps_next = NULL; 8269 help->dthps_prev = NULL; 8270 help->dthps_deferred = 0; 8271 help = next; 8272 } 8273 8274 mutex_exit(&dtrace_meta_lock); 8275 8276 return (0); 8277 } 8278 8279 int 8280 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8281 { 8282 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8283 8284 mutex_enter(&dtrace_meta_lock); 8285 mutex_enter(&dtrace_lock); 8286 8287 if (old == dtrace_meta_pid) { 8288 pp = &dtrace_meta_pid; 8289 } else { 8290 panic("attempt to unregister non-existent " 8291 "dtrace meta-provider %p\n", (void *)old); 8292 } 8293 8294 if (old->dtm_count != 0) { 8295 mutex_exit(&dtrace_lock); 8296 mutex_exit(&dtrace_meta_lock); 8297 return (EBUSY); 8298 } 8299 8300 *pp = NULL; 8301 8302 mutex_exit(&dtrace_lock); 8303 mutex_exit(&dtrace_meta_lock); 8304 8305 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8306 kmem_free(old, sizeof (dtrace_meta_t)); 8307 8308 return (0); 8309 } 8310 8311 8312 /* 8313 * DTrace DIF Object Functions 8314 */ 8315 static int 8316 dtrace_difo_err(uint_t pc, const char *format, ...) 8317 { 8318 if (dtrace_err_verbose) { 8319 va_list alist; 8320 8321 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8322 va_start(alist, format); 8323 (void) vuprintf(format, alist); 8324 va_end(alist); 8325 } 8326 8327 #ifdef DTRACE_ERRDEBUG 8328 dtrace_errdebug(format); 8329 #endif 8330 return (1); 8331 } 8332 8333 /* 8334 * Validate a DTrace DIF object by checking the IR instructions. The following 8335 * rules are currently enforced by dtrace_difo_validate(): 8336 * 8337 * 1. Each instruction must have a valid opcode 8338 * 2. Each register, string, variable, or subroutine reference must be valid 8339 * 3. No instruction can modify register %r0 (must be zero) 8340 * 4. All instruction reserved bits must be set to zero 8341 * 5. The last instruction must be a "ret" instruction 8342 * 6. All branch targets must reference a valid instruction _after_ the branch 8343 */ 8344 static int 8345 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8346 cred_t *cr) 8347 { 8348 int err = 0, i; 8349 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8350 int kcheckload; 8351 uint_t pc; 8352 8353 kcheckload = cr == NULL || 8354 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8355 8356 dp->dtdo_destructive = 0; 8357 8358 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8359 dif_instr_t instr = dp->dtdo_buf[pc]; 8360 8361 uint_t r1 = DIF_INSTR_R1(instr); 8362 uint_t r2 = DIF_INSTR_R2(instr); 8363 uint_t rd = DIF_INSTR_RD(instr); 8364 uint_t rs = DIF_INSTR_RS(instr); 8365 uint_t label = DIF_INSTR_LABEL(instr); 8366 uint_t v = DIF_INSTR_VAR(instr); 8367 uint_t subr = DIF_INSTR_SUBR(instr); 8368 uint_t type = DIF_INSTR_TYPE(instr); 8369 uint_t op = DIF_INSTR_OP(instr); 8370 8371 switch (op) { 8372 case DIF_OP_OR: 8373 case DIF_OP_XOR: 8374 case DIF_OP_AND: 8375 case DIF_OP_SLL: 8376 case DIF_OP_SRL: 8377 case DIF_OP_SRA: 8378 case DIF_OP_SUB: 8379 case DIF_OP_ADD: 8380 case DIF_OP_MUL: 8381 case DIF_OP_SDIV: 8382 case DIF_OP_UDIV: 8383 case DIF_OP_SREM: 8384 case DIF_OP_UREM: 8385 case DIF_OP_COPYS: 8386 if (r1 >= nregs) 8387 err += efunc(pc, "invalid register %u\n", r1); 8388 if (r2 >= nregs) 8389 err += efunc(pc, "invalid register %u\n", r2); 8390 if (rd >= nregs) 8391 err += efunc(pc, "invalid register %u\n", rd); 8392 if (rd == 0) 8393 err += efunc(pc, "cannot write to %r0\n"); 8394 break; 8395 case DIF_OP_NOT: 8396 case DIF_OP_MOV: 8397 case DIF_OP_ALLOCS: 8398 if (r1 >= nregs) 8399 err += efunc(pc, "invalid register %u\n", r1); 8400 if (r2 != 0) 8401 err += efunc(pc, "non-zero reserved bits\n"); 8402 if (rd >= nregs) 8403 err += efunc(pc, "invalid register %u\n", rd); 8404 if (rd == 0) 8405 err += efunc(pc, "cannot write to %r0\n"); 8406 break; 8407 case DIF_OP_LDSB: 8408 case DIF_OP_LDSH: 8409 case DIF_OP_LDSW: 8410 case DIF_OP_LDUB: 8411 case DIF_OP_LDUH: 8412 case DIF_OP_LDUW: 8413 case DIF_OP_LDX: 8414 if (r1 >= nregs) 8415 err += efunc(pc, "invalid register %u\n", r1); 8416 if (r2 != 0) 8417 err += efunc(pc, "non-zero reserved bits\n"); 8418 if (rd >= nregs) 8419 err += efunc(pc, "invalid register %u\n", rd); 8420 if (rd == 0) 8421 err += efunc(pc, "cannot write to %r0\n"); 8422 if (kcheckload) 8423 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8424 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8425 break; 8426 case DIF_OP_RLDSB: 8427 case DIF_OP_RLDSH: 8428 case DIF_OP_RLDSW: 8429 case DIF_OP_RLDUB: 8430 case DIF_OP_RLDUH: 8431 case DIF_OP_RLDUW: 8432 case DIF_OP_RLDX: 8433 if (r1 >= nregs) 8434 err += efunc(pc, "invalid register %u\n", r1); 8435 if (r2 != 0) 8436 err += efunc(pc, "non-zero reserved bits\n"); 8437 if (rd >= nregs) 8438 err += efunc(pc, "invalid register %u\n", rd); 8439 if (rd == 0) 8440 err += efunc(pc, "cannot write to %r0\n"); 8441 break; 8442 case DIF_OP_ULDSB: 8443 case DIF_OP_ULDSH: 8444 case DIF_OP_ULDSW: 8445 case DIF_OP_ULDUB: 8446 case DIF_OP_ULDUH: 8447 case DIF_OP_ULDUW: 8448 case DIF_OP_ULDX: 8449 if (r1 >= nregs) 8450 err += efunc(pc, "invalid register %u\n", r1); 8451 if (r2 != 0) 8452 err += efunc(pc, "non-zero reserved bits\n"); 8453 if (rd >= nregs) 8454 err += efunc(pc, "invalid register %u\n", rd); 8455 if (rd == 0) 8456 err += efunc(pc, "cannot write to %r0\n"); 8457 break; 8458 case DIF_OP_STB: 8459 case DIF_OP_STH: 8460 case DIF_OP_STW: 8461 case DIF_OP_STX: 8462 if (r1 >= nregs) 8463 err += efunc(pc, "invalid register %u\n", r1); 8464 if (r2 != 0) 8465 err += efunc(pc, "non-zero reserved bits\n"); 8466 if (rd >= nregs) 8467 err += efunc(pc, "invalid register %u\n", rd); 8468 if (rd == 0) 8469 err += efunc(pc, "cannot write to 0 address\n"); 8470 break; 8471 case DIF_OP_CMP: 8472 case DIF_OP_SCMP: 8473 if (r1 >= nregs) 8474 err += efunc(pc, "invalid register %u\n", r1); 8475 if (r2 >= nregs) 8476 err += efunc(pc, "invalid register %u\n", r2); 8477 if (rd != 0) 8478 err += efunc(pc, "non-zero reserved bits\n"); 8479 break; 8480 case DIF_OP_TST: 8481 if (r1 >= nregs) 8482 err += efunc(pc, "invalid register %u\n", r1); 8483 if (r2 != 0 || rd != 0) 8484 err += efunc(pc, "non-zero reserved bits\n"); 8485 break; 8486 case DIF_OP_BA: 8487 case DIF_OP_BE: 8488 case DIF_OP_BNE: 8489 case DIF_OP_BG: 8490 case DIF_OP_BGU: 8491 case DIF_OP_BGE: 8492 case DIF_OP_BGEU: 8493 case DIF_OP_BL: 8494 case DIF_OP_BLU: 8495 case DIF_OP_BLE: 8496 case DIF_OP_BLEU: 8497 if (label >= dp->dtdo_len) { 8498 err += efunc(pc, "invalid branch target %u\n", 8499 label); 8500 } 8501 if (label <= pc) { 8502 err += efunc(pc, "backward branch to %u\n", 8503 label); 8504 } 8505 break; 8506 case DIF_OP_RET: 8507 if (r1 != 0 || r2 != 0) 8508 err += efunc(pc, "non-zero reserved bits\n"); 8509 if (rd >= nregs) 8510 err += efunc(pc, "invalid register %u\n", rd); 8511 break; 8512 case DIF_OP_NOP: 8513 case DIF_OP_POPTS: 8514 case DIF_OP_FLUSHTS: 8515 if (r1 != 0 || r2 != 0 || rd != 0) 8516 err += efunc(pc, "non-zero reserved bits\n"); 8517 break; 8518 case DIF_OP_SETX: 8519 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8520 err += efunc(pc, "invalid integer ref %u\n", 8521 DIF_INSTR_INTEGER(instr)); 8522 } 8523 if (rd >= nregs) 8524 err += efunc(pc, "invalid register %u\n", rd); 8525 if (rd == 0) 8526 err += efunc(pc, "cannot write to %r0\n"); 8527 break; 8528 case DIF_OP_SETS: 8529 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8530 err += efunc(pc, "invalid string ref %u\n", 8531 DIF_INSTR_STRING(instr)); 8532 } 8533 if (rd >= nregs) 8534 err += efunc(pc, "invalid register %u\n", rd); 8535 if (rd == 0) 8536 err += efunc(pc, "cannot write to %r0\n"); 8537 break; 8538 case DIF_OP_LDGA: 8539 case DIF_OP_LDTA: 8540 if (r1 > DIF_VAR_ARRAY_MAX) 8541 err += efunc(pc, "invalid array %u\n", r1); 8542 if (r2 >= nregs) 8543 err += efunc(pc, "invalid register %u\n", r2); 8544 if (rd >= nregs) 8545 err += efunc(pc, "invalid register %u\n", rd); 8546 if (rd == 0) 8547 err += efunc(pc, "cannot write to %r0\n"); 8548 break; 8549 case DIF_OP_LDGS: 8550 case DIF_OP_LDTS: 8551 case DIF_OP_LDLS: 8552 case DIF_OP_LDGAA: 8553 case DIF_OP_LDTAA: 8554 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8555 err += efunc(pc, "invalid variable %u\n", v); 8556 if (rd >= nregs) 8557 err += efunc(pc, "invalid register %u\n", rd); 8558 if (rd == 0) 8559 err += efunc(pc, "cannot write to %r0\n"); 8560 break; 8561 case DIF_OP_STGS: 8562 case DIF_OP_STTS: 8563 case DIF_OP_STLS: 8564 case DIF_OP_STGAA: 8565 case DIF_OP_STTAA: 8566 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8567 err += efunc(pc, "invalid variable %u\n", v); 8568 if (rs >= nregs) 8569 err += efunc(pc, "invalid register %u\n", rd); 8570 break; 8571 case DIF_OP_CALL: 8572 if (subr > DIF_SUBR_MAX) 8573 err += efunc(pc, "invalid subr %u\n", subr); 8574 if (rd >= nregs) 8575 err += efunc(pc, "invalid register %u\n", rd); 8576 if (rd == 0) 8577 err += efunc(pc, "cannot write to %r0\n"); 8578 8579 if (subr == DIF_SUBR_COPYOUT || 8580 subr == DIF_SUBR_COPYOUTSTR) { 8581 dp->dtdo_destructive = 1; 8582 } 8583 break; 8584 case DIF_OP_PUSHTR: 8585 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8586 err += efunc(pc, "invalid ref type %u\n", type); 8587 if (r2 >= nregs) 8588 err += efunc(pc, "invalid register %u\n", r2); 8589 if (rs >= nregs) 8590 err += efunc(pc, "invalid register %u\n", rs); 8591 break; 8592 case DIF_OP_PUSHTV: 8593 if (type != DIF_TYPE_CTF) 8594 err += efunc(pc, "invalid val type %u\n", type); 8595 if (r2 >= nregs) 8596 err += efunc(pc, "invalid register %u\n", r2); 8597 if (rs >= nregs) 8598 err += efunc(pc, "invalid register %u\n", rs); 8599 break; 8600 default: 8601 err += efunc(pc, "invalid opcode %u\n", 8602 DIF_INSTR_OP(instr)); 8603 } 8604 } 8605 8606 if (dp->dtdo_len != 0 && 8607 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8608 err += efunc(dp->dtdo_len - 1, 8609 "expected 'ret' as last DIF instruction\n"); 8610 } 8611 8612 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8613 /* 8614 * If we're not returning by reference, the size must be either 8615 * 0 or the size of one of the base types. 8616 */ 8617 switch (dp->dtdo_rtype.dtdt_size) { 8618 case 0: 8619 case sizeof (uint8_t): 8620 case sizeof (uint16_t): 8621 case sizeof (uint32_t): 8622 case sizeof (uint64_t): 8623 break; 8624 8625 default: 8626 err += efunc(dp->dtdo_len - 1, "bad return size"); 8627 } 8628 } 8629 8630 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8631 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8632 dtrace_diftype_t *vt, *et; 8633 uint_t id, ndx; 8634 8635 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8636 v->dtdv_scope != DIFV_SCOPE_THREAD && 8637 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8638 err += efunc(i, "unrecognized variable scope %d\n", 8639 v->dtdv_scope); 8640 break; 8641 } 8642 8643 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8644 v->dtdv_kind != DIFV_KIND_SCALAR) { 8645 err += efunc(i, "unrecognized variable type %d\n", 8646 v->dtdv_kind); 8647 break; 8648 } 8649 8650 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8651 err += efunc(i, "%d exceeds variable id limit\n", id); 8652 break; 8653 } 8654 8655 if (id < DIF_VAR_OTHER_UBASE) 8656 continue; 8657 8658 /* 8659 * For user-defined variables, we need to check that this 8660 * definition is identical to any previous definition that we 8661 * encountered. 8662 */ 8663 ndx = id - DIF_VAR_OTHER_UBASE; 8664 8665 switch (v->dtdv_scope) { 8666 case DIFV_SCOPE_GLOBAL: 8667 if (ndx < vstate->dtvs_nglobals) { 8668 dtrace_statvar_t *svar; 8669 8670 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8671 existing = &svar->dtsv_var; 8672 } 8673 8674 break; 8675 8676 case DIFV_SCOPE_THREAD: 8677 if (ndx < vstate->dtvs_ntlocals) 8678 existing = &vstate->dtvs_tlocals[ndx]; 8679 break; 8680 8681 case DIFV_SCOPE_LOCAL: 8682 if (ndx < vstate->dtvs_nlocals) { 8683 dtrace_statvar_t *svar; 8684 8685 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8686 existing = &svar->dtsv_var; 8687 } 8688 8689 break; 8690 } 8691 8692 vt = &v->dtdv_type; 8693 8694 if (vt->dtdt_flags & DIF_TF_BYREF) { 8695 if (vt->dtdt_size == 0) { 8696 err += efunc(i, "zero-sized variable\n"); 8697 break; 8698 } 8699 8700 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8701 vt->dtdt_size > dtrace_global_maxsize) { 8702 err += efunc(i, "oversized by-ref global\n"); 8703 break; 8704 } 8705 } 8706 8707 if (existing == NULL || existing->dtdv_id == 0) 8708 continue; 8709 8710 ASSERT(existing->dtdv_id == v->dtdv_id); 8711 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8712 8713 if (existing->dtdv_kind != v->dtdv_kind) 8714 err += efunc(i, "%d changed variable kind\n", id); 8715 8716 et = &existing->dtdv_type; 8717 8718 if (vt->dtdt_flags != et->dtdt_flags) { 8719 err += efunc(i, "%d changed variable type flags\n", id); 8720 break; 8721 } 8722 8723 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8724 err += efunc(i, "%d changed variable type size\n", id); 8725 break; 8726 } 8727 } 8728 8729 return (err); 8730 } 8731 8732 #if defined(sun) 8733 /* 8734 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8735 * are much more constrained than normal DIFOs. Specifically, they may 8736 * not: 8737 * 8738 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8739 * miscellaneous string routines 8740 * 2. Access DTrace variables other than the args[] array, and the 8741 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8742 * 3. Have thread-local variables. 8743 * 4. Have dynamic variables. 8744 */ 8745 static int 8746 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8747 { 8748 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8749 int err = 0; 8750 uint_t pc; 8751 8752 for (pc = 0; pc < dp->dtdo_len; pc++) { 8753 dif_instr_t instr = dp->dtdo_buf[pc]; 8754 8755 uint_t v = DIF_INSTR_VAR(instr); 8756 uint_t subr = DIF_INSTR_SUBR(instr); 8757 uint_t op = DIF_INSTR_OP(instr); 8758 8759 switch (op) { 8760 case DIF_OP_OR: 8761 case DIF_OP_XOR: 8762 case DIF_OP_AND: 8763 case DIF_OP_SLL: 8764 case DIF_OP_SRL: 8765 case DIF_OP_SRA: 8766 case DIF_OP_SUB: 8767 case DIF_OP_ADD: 8768 case DIF_OP_MUL: 8769 case DIF_OP_SDIV: 8770 case DIF_OP_UDIV: 8771 case DIF_OP_SREM: 8772 case DIF_OP_UREM: 8773 case DIF_OP_COPYS: 8774 case DIF_OP_NOT: 8775 case DIF_OP_MOV: 8776 case DIF_OP_RLDSB: 8777 case DIF_OP_RLDSH: 8778 case DIF_OP_RLDSW: 8779 case DIF_OP_RLDUB: 8780 case DIF_OP_RLDUH: 8781 case DIF_OP_RLDUW: 8782 case DIF_OP_RLDX: 8783 case DIF_OP_ULDSB: 8784 case DIF_OP_ULDSH: 8785 case DIF_OP_ULDSW: 8786 case DIF_OP_ULDUB: 8787 case DIF_OP_ULDUH: 8788 case DIF_OP_ULDUW: 8789 case DIF_OP_ULDX: 8790 case DIF_OP_STB: 8791 case DIF_OP_STH: 8792 case DIF_OP_STW: 8793 case DIF_OP_STX: 8794 case DIF_OP_ALLOCS: 8795 case DIF_OP_CMP: 8796 case DIF_OP_SCMP: 8797 case DIF_OP_TST: 8798 case DIF_OP_BA: 8799 case DIF_OP_BE: 8800 case DIF_OP_BNE: 8801 case DIF_OP_BG: 8802 case DIF_OP_BGU: 8803 case DIF_OP_BGE: 8804 case DIF_OP_BGEU: 8805 case DIF_OP_BL: 8806 case DIF_OP_BLU: 8807 case DIF_OP_BLE: 8808 case DIF_OP_BLEU: 8809 case DIF_OP_RET: 8810 case DIF_OP_NOP: 8811 case DIF_OP_POPTS: 8812 case DIF_OP_FLUSHTS: 8813 case DIF_OP_SETX: 8814 case DIF_OP_SETS: 8815 case DIF_OP_LDGA: 8816 case DIF_OP_LDLS: 8817 case DIF_OP_STGS: 8818 case DIF_OP_STLS: 8819 case DIF_OP_PUSHTR: 8820 case DIF_OP_PUSHTV: 8821 break; 8822 8823 case DIF_OP_LDGS: 8824 if (v >= DIF_VAR_OTHER_UBASE) 8825 break; 8826 8827 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8828 break; 8829 8830 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8831 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8832 v == DIF_VAR_EXECARGS || 8833 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8834 v == DIF_VAR_UID || v == DIF_VAR_GID) 8835 break; 8836 8837 err += efunc(pc, "illegal variable %u\n", v); 8838 break; 8839 8840 case DIF_OP_LDTA: 8841 case DIF_OP_LDTS: 8842 case DIF_OP_LDGAA: 8843 case DIF_OP_LDTAA: 8844 err += efunc(pc, "illegal dynamic variable load\n"); 8845 break; 8846 8847 case DIF_OP_STTS: 8848 case DIF_OP_STGAA: 8849 case DIF_OP_STTAA: 8850 err += efunc(pc, "illegal dynamic variable store\n"); 8851 break; 8852 8853 case DIF_OP_CALL: 8854 if (subr == DIF_SUBR_ALLOCA || 8855 subr == DIF_SUBR_BCOPY || 8856 subr == DIF_SUBR_COPYIN || 8857 subr == DIF_SUBR_COPYINTO || 8858 subr == DIF_SUBR_COPYINSTR || 8859 subr == DIF_SUBR_INDEX || 8860 subr == DIF_SUBR_INET_NTOA || 8861 subr == DIF_SUBR_INET_NTOA6 || 8862 subr == DIF_SUBR_INET_NTOP || 8863 subr == DIF_SUBR_LLTOSTR || 8864 subr == DIF_SUBR_RINDEX || 8865 subr == DIF_SUBR_STRCHR || 8866 subr == DIF_SUBR_STRJOIN || 8867 subr == DIF_SUBR_STRRCHR || 8868 subr == DIF_SUBR_STRSTR || 8869 subr == DIF_SUBR_HTONS || 8870 subr == DIF_SUBR_HTONL || 8871 subr == DIF_SUBR_HTONLL || 8872 subr == DIF_SUBR_NTOHS || 8873 subr == DIF_SUBR_NTOHL || 8874 subr == DIF_SUBR_NTOHLL || 8875 subr == DIF_SUBR_MEMREF || 8876 subr == DIF_SUBR_TYPEREF) 8877 break; 8878 8879 err += efunc(pc, "invalid subr %u\n", subr); 8880 break; 8881 8882 default: 8883 err += efunc(pc, "invalid opcode %u\n", 8884 DIF_INSTR_OP(instr)); 8885 } 8886 } 8887 8888 return (err); 8889 } 8890 #endif 8891 8892 /* 8893 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8894 * basis; 0 if not. 8895 */ 8896 static int 8897 dtrace_difo_cacheable(dtrace_difo_t *dp) 8898 { 8899 int i; 8900 8901 if (dp == NULL) 8902 return (0); 8903 8904 for (i = 0; i < dp->dtdo_varlen; i++) { 8905 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8906 8907 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8908 continue; 8909 8910 switch (v->dtdv_id) { 8911 case DIF_VAR_CURTHREAD: 8912 case DIF_VAR_PID: 8913 case DIF_VAR_TID: 8914 case DIF_VAR_EXECARGS: 8915 case DIF_VAR_EXECNAME: 8916 case DIF_VAR_ZONENAME: 8917 break; 8918 8919 default: 8920 return (0); 8921 } 8922 } 8923 8924 /* 8925 * This DIF object may be cacheable. Now we need to look for any 8926 * array loading instructions, any memory loading instructions, or 8927 * any stores to thread-local variables. 8928 */ 8929 for (i = 0; i < dp->dtdo_len; i++) { 8930 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8931 8932 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8933 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8934 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8935 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8936 return (0); 8937 } 8938 8939 return (1); 8940 } 8941 8942 static void 8943 dtrace_difo_hold(dtrace_difo_t *dp) 8944 { 8945 int i; 8946 8947 ASSERT(MUTEX_HELD(&dtrace_lock)); 8948 8949 dp->dtdo_refcnt++; 8950 ASSERT(dp->dtdo_refcnt != 0); 8951 8952 /* 8953 * We need to check this DIF object for references to the variable 8954 * DIF_VAR_VTIMESTAMP. 8955 */ 8956 for (i = 0; i < dp->dtdo_varlen; i++) { 8957 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8958 8959 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8960 continue; 8961 8962 if (dtrace_vtime_references++ == 0) 8963 dtrace_vtime_enable(); 8964 } 8965 } 8966 8967 /* 8968 * This routine calculates the dynamic variable chunksize for a given DIF 8969 * object. The calculation is not fool-proof, and can probably be tricked by 8970 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8971 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8972 * if a dynamic variable size exceeds the chunksize. 8973 */ 8974 static void 8975 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8976 { 8977 uint64_t sval = 0; 8978 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8979 const dif_instr_t *text = dp->dtdo_buf; 8980 uint_t pc, srd = 0; 8981 uint_t ttop = 0; 8982 size_t size, ksize; 8983 uint_t id, i; 8984 8985 for (pc = 0; pc < dp->dtdo_len; pc++) { 8986 dif_instr_t instr = text[pc]; 8987 uint_t op = DIF_INSTR_OP(instr); 8988 uint_t rd = DIF_INSTR_RD(instr); 8989 uint_t r1 = DIF_INSTR_R1(instr); 8990 uint_t nkeys = 0; 8991 uchar_t scope = 0; 8992 8993 dtrace_key_t *key = tupregs; 8994 8995 switch (op) { 8996 case DIF_OP_SETX: 8997 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8998 srd = rd; 8999 continue; 9000 9001 case DIF_OP_STTS: 9002 key = &tupregs[DIF_DTR_NREGS]; 9003 key[0].dttk_size = 0; 9004 key[1].dttk_size = 0; 9005 nkeys = 2; 9006 scope = DIFV_SCOPE_THREAD; 9007 break; 9008 9009 case DIF_OP_STGAA: 9010 case DIF_OP_STTAA: 9011 nkeys = ttop; 9012 9013 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9014 key[nkeys++].dttk_size = 0; 9015 9016 key[nkeys++].dttk_size = 0; 9017 9018 if (op == DIF_OP_STTAA) { 9019 scope = DIFV_SCOPE_THREAD; 9020 } else { 9021 scope = DIFV_SCOPE_GLOBAL; 9022 } 9023 9024 break; 9025 9026 case DIF_OP_PUSHTR: 9027 if (ttop == DIF_DTR_NREGS) 9028 return; 9029 9030 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9031 /* 9032 * If the register for the size of the "pushtr" 9033 * is %r0 (or the value is 0) and the type is 9034 * a string, we'll use the system-wide default 9035 * string size. 9036 */ 9037 tupregs[ttop++].dttk_size = 9038 dtrace_strsize_default; 9039 } else { 9040 if (srd == 0) 9041 return; 9042 9043 tupregs[ttop++].dttk_size = sval; 9044 } 9045 9046 break; 9047 9048 case DIF_OP_PUSHTV: 9049 if (ttop == DIF_DTR_NREGS) 9050 return; 9051 9052 tupregs[ttop++].dttk_size = 0; 9053 break; 9054 9055 case DIF_OP_FLUSHTS: 9056 ttop = 0; 9057 break; 9058 9059 case DIF_OP_POPTS: 9060 if (ttop != 0) 9061 ttop--; 9062 break; 9063 } 9064 9065 sval = 0; 9066 srd = 0; 9067 9068 if (nkeys == 0) 9069 continue; 9070 9071 /* 9072 * We have a dynamic variable allocation; calculate its size. 9073 */ 9074 for (ksize = 0, i = 0; i < nkeys; i++) 9075 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9076 9077 size = sizeof (dtrace_dynvar_t); 9078 size += sizeof (dtrace_key_t) * (nkeys - 1); 9079 size += ksize; 9080 9081 /* 9082 * Now we need to determine the size of the stored data. 9083 */ 9084 id = DIF_INSTR_VAR(instr); 9085 9086 for (i = 0; i < dp->dtdo_varlen; i++) { 9087 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9088 9089 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9090 size += v->dtdv_type.dtdt_size; 9091 break; 9092 } 9093 } 9094 9095 if (i == dp->dtdo_varlen) 9096 return; 9097 9098 /* 9099 * We have the size. If this is larger than the chunk size 9100 * for our dynamic variable state, reset the chunk size. 9101 */ 9102 size = P2ROUNDUP(size, sizeof (uint64_t)); 9103 9104 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9105 vstate->dtvs_dynvars.dtds_chunksize = size; 9106 } 9107 } 9108 9109 static void 9110 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9111 { 9112 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9113 uint_t id; 9114 9115 ASSERT(MUTEX_HELD(&dtrace_lock)); 9116 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9117 9118 for (i = 0; i < dp->dtdo_varlen; i++) { 9119 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9120 dtrace_statvar_t *svar, ***svarp = NULL; 9121 size_t dsize = 0; 9122 uint8_t scope = v->dtdv_scope; 9123 int *np = NULL; 9124 9125 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9126 continue; 9127 9128 id -= DIF_VAR_OTHER_UBASE; 9129 9130 switch (scope) { 9131 case DIFV_SCOPE_THREAD: 9132 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9133 dtrace_difv_t *tlocals; 9134 9135 if ((ntlocals = (otlocals << 1)) == 0) 9136 ntlocals = 1; 9137 9138 osz = otlocals * sizeof (dtrace_difv_t); 9139 nsz = ntlocals * sizeof (dtrace_difv_t); 9140 9141 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9142 9143 if (osz != 0) { 9144 bcopy(vstate->dtvs_tlocals, 9145 tlocals, osz); 9146 kmem_free(vstate->dtvs_tlocals, osz); 9147 } 9148 9149 vstate->dtvs_tlocals = tlocals; 9150 vstate->dtvs_ntlocals = ntlocals; 9151 } 9152 9153 vstate->dtvs_tlocals[id] = *v; 9154 continue; 9155 9156 case DIFV_SCOPE_LOCAL: 9157 np = &vstate->dtvs_nlocals; 9158 svarp = &vstate->dtvs_locals; 9159 9160 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9161 dsize = NCPU * (v->dtdv_type.dtdt_size + 9162 sizeof (uint64_t)); 9163 else 9164 dsize = NCPU * sizeof (uint64_t); 9165 9166 break; 9167 9168 case DIFV_SCOPE_GLOBAL: 9169 np = &vstate->dtvs_nglobals; 9170 svarp = &vstate->dtvs_globals; 9171 9172 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9173 dsize = v->dtdv_type.dtdt_size + 9174 sizeof (uint64_t); 9175 9176 break; 9177 9178 default: 9179 ASSERT(0); 9180 } 9181 9182 while (id >= (oldsvars = *np)) { 9183 dtrace_statvar_t **statics; 9184 int newsvars, oldsize, newsize; 9185 9186 if ((newsvars = (oldsvars << 1)) == 0) 9187 newsvars = 1; 9188 9189 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9190 newsize = newsvars * sizeof (dtrace_statvar_t *); 9191 9192 statics = kmem_zalloc(newsize, KM_SLEEP); 9193 9194 if (oldsize != 0) { 9195 bcopy(*svarp, statics, oldsize); 9196 kmem_free(*svarp, oldsize); 9197 } 9198 9199 *svarp = statics; 9200 *np = newsvars; 9201 } 9202 9203 if ((svar = (*svarp)[id]) == NULL) { 9204 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9205 svar->dtsv_var = *v; 9206 9207 if ((svar->dtsv_size = dsize) != 0) { 9208 svar->dtsv_data = (uint64_t)(uintptr_t) 9209 kmem_zalloc(dsize, KM_SLEEP); 9210 } 9211 9212 (*svarp)[id] = svar; 9213 } 9214 9215 svar->dtsv_refcnt++; 9216 } 9217 9218 dtrace_difo_chunksize(dp, vstate); 9219 dtrace_difo_hold(dp); 9220 } 9221 9222 #if defined(sun) 9223 static dtrace_difo_t * 9224 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9225 { 9226 dtrace_difo_t *new; 9227 size_t sz; 9228 9229 ASSERT(dp->dtdo_buf != NULL); 9230 ASSERT(dp->dtdo_refcnt != 0); 9231 9232 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9233 9234 ASSERT(dp->dtdo_buf != NULL); 9235 sz = dp->dtdo_len * sizeof (dif_instr_t); 9236 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9237 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9238 new->dtdo_len = dp->dtdo_len; 9239 9240 if (dp->dtdo_strtab != NULL) { 9241 ASSERT(dp->dtdo_strlen != 0); 9242 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9243 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9244 new->dtdo_strlen = dp->dtdo_strlen; 9245 } 9246 9247 if (dp->dtdo_inttab != NULL) { 9248 ASSERT(dp->dtdo_intlen != 0); 9249 sz = dp->dtdo_intlen * sizeof (uint64_t); 9250 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9251 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9252 new->dtdo_intlen = dp->dtdo_intlen; 9253 } 9254 9255 if (dp->dtdo_vartab != NULL) { 9256 ASSERT(dp->dtdo_varlen != 0); 9257 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9258 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9259 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9260 new->dtdo_varlen = dp->dtdo_varlen; 9261 } 9262 9263 dtrace_difo_init(new, vstate); 9264 return (new); 9265 } 9266 #endif 9267 9268 static void 9269 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9270 { 9271 int i; 9272 9273 ASSERT(dp->dtdo_refcnt == 0); 9274 9275 for (i = 0; i < dp->dtdo_varlen; i++) { 9276 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9277 dtrace_statvar_t *svar, **svarp = NULL; 9278 uint_t id; 9279 uint8_t scope = v->dtdv_scope; 9280 int *np = NULL; 9281 9282 switch (scope) { 9283 case DIFV_SCOPE_THREAD: 9284 continue; 9285 9286 case DIFV_SCOPE_LOCAL: 9287 np = &vstate->dtvs_nlocals; 9288 svarp = vstate->dtvs_locals; 9289 break; 9290 9291 case DIFV_SCOPE_GLOBAL: 9292 np = &vstate->dtvs_nglobals; 9293 svarp = vstate->dtvs_globals; 9294 break; 9295 9296 default: 9297 ASSERT(0); 9298 } 9299 9300 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9301 continue; 9302 9303 id -= DIF_VAR_OTHER_UBASE; 9304 ASSERT(id < *np); 9305 9306 svar = svarp[id]; 9307 ASSERT(svar != NULL); 9308 ASSERT(svar->dtsv_refcnt > 0); 9309 9310 if (--svar->dtsv_refcnt > 0) 9311 continue; 9312 9313 if (svar->dtsv_size != 0) { 9314 ASSERT(svar->dtsv_data != 0); 9315 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9316 svar->dtsv_size); 9317 } 9318 9319 kmem_free(svar, sizeof (dtrace_statvar_t)); 9320 svarp[id] = NULL; 9321 } 9322 9323 if (dp->dtdo_buf != NULL) 9324 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9325 if (dp->dtdo_inttab != NULL) 9326 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9327 if (dp->dtdo_strtab != NULL) 9328 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9329 if (dp->dtdo_vartab != NULL) 9330 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9331 9332 kmem_free(dp, sizeof (dtrace_difo_t)); 9333 } 9334 9335 static void 9336 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9337 { 9338 int i; 9339 9340 ASSERT(MUTEX_HELD(&dtrace_lock)); 9341 ASSERT(dp->dtdo_refcnt != 0); 9342 9343 for (i = 0; i < dp->dtdo_varlen; i++) { 9344 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9345 9346 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9347 continue; 9348 9349 ASSERT(dtrace_vtime_references > 0); 9350 if (--dtrace_vtime_references == 0) 9351 dtrace_vtime_disable(); 9352 } 9353 9354 if (--dp->dtdo_refcnt == 0) 9355 dtrace_difo_destroy(dp, vstate); 9356 } 9357 9358 /* 9359 * DTrace Format Functions 9360 */ 9361 static uint16_t 9362 dtrace_format_add(dtrace_state_t *state, char *str) 9363 { 9364 char *fmt, **new; 9365 uint16_t ndx, len = strlen(str) + 1; 9366 9367 fmt = kmem_zalloc(len, KM_SLEEP); 9368 bcopy(str, fmt, len); 9369 9370 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9371 if (state->dts_formats[ndx] == NULL) { 9372 state->dts_formats[ndx] = fmt; 9373 return (ndx + 1); 9374 } 9375 } 9376 9377 if (state->dts_nformats == USHRT_MAX) { 9378 /* 9379 * This is only likely if a denial-of-service attack is being 9380 * attempted. As such, it's okay to fail silently here. 9381 */ 9382 kmem_free(fmt, len); 9383 return (0); 9384 } 9385 9386 /* 9387 * For simplicity, we always resize the formats array to be exactly the 9388 * number of formats. 9389 */ 9390 ndx = state->dts_nformats++; 9391 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9392 9393 if (state->dts_formats != NULL) { 9394 ASSERT(ndx != 0); 9395 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9396 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9397 } 9398 9399 state->dts_formats = new; 9400 state->dts_formats[ndx] = fmt; 9401 9402 return (ndx + 1); 9403 } 9404 9405 static void 9406 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9407 { 9408 char *fmt; 9409 9410 ASSERT(state->dts_formats != NULL); 9411 ASSERT(format <= state->dts_nformats); 9412 ASSERT(state->dts_formats[format - 1] != NULL); 9413 9414 fmt = state->dts_formats[format - 1]; 9415 kmem_free(fmt, strlen(fmt) + 1); 9416 state->dts_formats[format - 1] = NULL; 9417 } 9418 9419 static void 9420 dtrace_format_destroy(dtrace_state_t *state) 9421 { 9422 int i; 9423 9424 if (state->dts_nformats == 0) { 9425 ASSERT(state->dts_formats == NULL); 9426 return; 9427 } 9428 9429 ASSERT(state->dts_formats != NULL); 9430 9431 for (i = 0; i < state->dts_nformats; i++) { 9432 char *fmt = state->dts_formats[i]; 9433 9434 if (fmt == NULL) 9435 continue; 9436 9437 kmem_free(fmt, strlen(fmt) + 1); 9438 } 9439 9440 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9441 state->dts_nformats = 0; 9442 state->dts_formats = NULL; 9443 } 9444 9445 /* 9446 * DTrace Predicate Functions 9447 */ 9448 static dtrace_predicate_t * 9449 dtrace_predicate_create(dtrace_difo_t *dp) 9450 { 9451 dtrace_predicate_t *pred; 9452 9453 ASSERT(MUTEX_HELD(&dtrace_lock)); 9454 ASSERT(dp->dtdo_refcnt != 0); 9455 9456 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9457 pred->dtp_difo = dp; 9458 pred->dtp_refcnt = 1; 9459 9460 if (!dtrace_difo_cacheable(dp)) 9461 return (pred); 9462 9463 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9464 /* 9465 * This is only theoretically possible -- we have had 2^32 9466 * cacheable predicates on this machine. We cannot allow any 9467 * more predicates to become cacheable: as unlikely as it is, 9468 * there may be a thread caching a (now stale) predicate cache 9469 * ID. (N.B.: the temptation is being successfully resisted to 9470 * have this cmn_err() "Holy shit -- we executed this code!") 9471 */ 9472 return (pred); 9473 } 9474 9475 pred->dtp_cacheid = dtrace_predcache_id++; 9476 9477 return (pred); 9478 } 9479 9480 static void 9481 dtrace_predicate_hold(dtrace_predicate_t *pred) 9482 { 9483 ASSERT(MUTEX_HELD(&dtrace_lock)); 9484 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9485 ASSERT(pred->dtp_refcnt > 0); 9486 9487 pred->dtp_refcnt++; 9488 } 9489 9490 static void 9491 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9492 { 9493 dtrace_difo_t *dp = pred->dtp_difo; 9494 9495 ASSERT(MUTEX_HELD(&dtrace_lock)); 9496 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9497 ASSERT(pred->dtp_refcnt > 0); 9498 9499 if (--pred->dtp_refcnt == 0) { 9500 dtrace_difo_release(pred->dtp_difo, vstate); 9501 kmem_free(pred, sizeof (dtrace_predicate_t)); 9502 } 9503 } 9504 9505 /* 9506 * DTrace Action Description Functions 9507 */ 9508 static dtrace_actdesc_t * 9509 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9510 uint64_t uarg, uint64_t arg) 9511 { 9512 dtrace_actdesc_t *act; 9513 9514 #if defined(sun) 9515 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9516 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9517 #endif 9518 9519 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9520 act->dtad_kind = kind; 9521 act->dtad_ntuple = ntuple; 9522 act->dtad_uarg = uarg; 9523 act->dtad_arg = arg; 9524 act->dtad_refcnt = 1; 9525 9526 return (act); 9527 } 9528 9529 static void 9530 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9531 { 9532 ASSERT(act->dtad_refcnt >= 1); 9533 act->dtad_refcnt++; 9534 } 9535 9536 static void 9537 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9538 { 9539 dtrace_actkind_t kind = act->dtad_kind; 9540 dtrace_difo_t *dp; 9541 9542 ASSERT(act->dtad_refcnt >= 1); 9543 9544 if (--act->dtad_refcnt != 0) 9545 return; 9546 9547 if ((dp = act->dtad_difo) != NULL) 9548 dtrace_difo_release(dp, vstate); 9549 9550 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9551 char *str = (char *)(uintptr_t)act->dtad_arg; 9552 9553 #if defined(sun) 9554 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9555 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9556 #endif 9557 9558 if (str != NULL) 9559 kmem_free(str, strlen(str) + 1); 9560 } 9561 9562 kmem_free(act, sizeof (dtrace_actdesc_t)); 9563 } 9564 9565 /* 9566 * DTrace ECB Functions 9567 */ 9568 static dtrace_ecb_t * 9569 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9570 { 9571 dtrace_ecb_t *ecb; 9572 dtrace_epid_t epid; 9573 9574 ASSERT(MUTEX_HELD(&dtrace_lock)); 9575 9576 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9577 ecb->dte_predicate = NULL; 9578 ecb->dte_probe = probe; 9579 9580 /* 9581 * The default size is the size of the default action: recording 9582 * the epid. 9583 */ 9584 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9585 ecb->dte_alignment = sizeof (dtrace_epid_t); 9586 9587 epid = state->dts_epid++; 9588 9589 if (epid - 1 >= state->dts_necbs) { 9590 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9591 int necbs = state->dts_necbs << 1; 9592 9593 ASSERT(epid == state->dts_necbs + 1); 9594 9595 if (necbs == 0) { 9596 ASSERT(oecbs == NULL); 9597 necbs = 1; 9598 } 9599 9600 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9601 9602 if (oecbs != NULL) 9603 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9604 9605 dtrace_membar_producer(); 9606 state->dts_ecbs = ecbs; 9607 9608 if (oecbs != NULL) { 9609 /* 9610 * If this state is active, we must dtrace_sync() 9611 * before we can free the old dts_ecbs array: we're 9612 * coming in hot, and there may be active ring 9613 * buffer processing (which indexes into the dts_ecbs 9614 * array) on another CPU. 9615 */ 9616 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9617 dtrace_sync(); 9618 9619 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9620 } 9621 9622 dtrace_membar_producer(); 9623 state->dts_necbs = necbs; 9624 } 9625 9626 ecb->dte_state = state; 9627 9628 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9629 dtrace_membar_producer(); 9630 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9631 9632 return (ecb); 9633 } 9634 9635 static void 9636 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9637 { 9638 dtrace_probe_t *probe = ecb->dte_probe; 9639 9640 ASSERT(MUTEX_HELD(&cpu_lock)); 9641 ASSERT(MUTEX_HELD(&dtrace_lock)); 9642 ASSERT(ecb->dte_next == NULL); 9643 9644 if (probe == NULL) { 9645 /* 9646 * This is the NULL probe -- there's nothing to do. 9647 */ 9648 return; 9649 } 9650 9651 if (probe->dtpr_ecb == NULL) { 9652 dtrace_provider_t *prov = probe->dtpr_provider; 9653 9654 /* 9655 * We're the first ECB on this probe. 9656 */ 9657 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9658 9659 if (ecb->dte_predicate != NULL) 9660 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9661 9662 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9663 probe->dtpr_id, probe->dtpr_arg); 9664 } else { 9665 /* 9666 * This probe is already active. Swing the last pointer to 9667 * point to the new ECB, and issue a dtrace_sync() to assure 9668 * that all CPUs have seen the change. 9669 */ 9670 ASSERT(probe->dtpr_ecb_last != NULL); 9671 probe->dtpr_ecb_last->dte_next = ecb; 9672 probe->dtpr_ecb_last = ecb; 9673 probe->dtpr_predcache = 0; 9674 9675 dtrace_sync(); 9676 } 9677 } 9678 9679 static void 9680 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9681 { 9682 uint32_t maxalign = sizeof (dtrace_epid_t); 9683 uint32_t align = sizeof (uint8_t), offs, diff; 9684 dtrace_action_t *act; 9685 int wastuple = 0; 9686 uint32_t aggbase = UINT32_MAX; 9687 dtrace_state_t *state = ecb->dte_state; 9688 9689 /* 9690 * If we record anything, we always record the epid. (And we always 9691 * record it first.) 9692 */ 9693 offs = sizeof (dtrace_epid_t); 9694 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9695 9696 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9697 dtrace_recdesc_t *rec = &act->dta_rec; 9698 9699 if ((align = rec->dtrd_alignment) > maxalign) 9700 maxalign = align; 9701 9702 if (!wastuple && act->dta_intuple) { 9703 /* 9704 * This is the first record in a tuple. Align the 9705 * offset to be at offset 4 in an 8-byte aligned 9706 * block. 9707 */ 9708 diff = offs + sizeof (dtrace_aggid_t); 9709 9710 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9711 offs += sizeof (uint64_t) - diff; 9712 9713 aggbase = offs - sizeof (dtrace_aggid_t); 9714 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9715 } 9716 9717 /*LINTED*/ 9718 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9719 /* 9720 * The current offset is not properly aligned; align it. 9721 */ 9722 offs += align - diff; 9723 } 9724 9725 rec->dtrd_offset = offs; 9726 9727 if (offs + rec->dtrd_size > ecb->dte_needed) { 9728 ecb->dte_needed = offs + rec->dtrd_size; 9729 9730 if (ecb->dte_needed > state->dts_needed) 9731 state->dts_needed = ecb->dte_needed; 9732 } 9733 9734 if (DTRACEACT_ISAGG(act->dta_kind)) { 9735 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9736 dtrace_action_t *first = agg->dtag_first, *prev; 9737 9738 ASSERT(rec->dtrd_size != 0 && first != NULL); 9739 ASSERT(wastuple); 9740 ASSERT(aggbase != UINT32_MAX); 9741 9742 agg->dtag_base = aggbase; 9743 9744 while ((prev = first->dta_prev) != NULL && 9745 DTRACEACT_ISAGG(prev->dta_kind)) { 9746 agg = (dtrace_aggregation_t *)prev; 9747 first = agg->dtag_first; 9748 } 9749 9750 if (prev != NULL) { 9751 offs = prev->dta_rec.dtrd_offset + 9752 prev->dta_rec.dtrd_size; 9753 } else { 9754 offs = sizeof (dtrace_epid_t); 9755 } 9756 wastuple = 0; 9757 } else { 9758 if (!act->dta_intuple) 9759 ecb->dte_size = offs + rec->dtrd_size; 9760 9761 offs += rec->dtrd_size; 9762 } 9763 9764 wastuple = act->dta_intuple; 9765 } 9766 9767 if ((act = ecb->dte_action) != NULL && 9768 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9769 ecb->dte_size == sizeof (dtrace_epid_t)) { 9770 /* 9771 * If the size is still sizeof (dtrace_epid_t), then all 9772 * actions store no data; set the size to 0. 9773 */ 9774 ecb->dte_alignment = maxalign; 9775 ecb->dte_size = 0; 9776 9777 /* 9778 * If the needed space is still sizeof (dtrace_epid_t), then 9779 * all actions need no additional space; set the needed 9780 * size to 0. 9781 */ 9782 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9783 ecb->dte_needed = 0; 9784 9785 return; 9786 } 9787 9788 /* 9789 * Set our alignment, and make sure that the dte_size and dte_needed 9790 * are aligned to the size of an EPID. 9791 */ 9792 ecb->dte_alignment = maxalign; 9793 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9794 ~(sizeof (dtrace_epid_t) - 1); 9795 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9796 ~(sizeof (dtrace_epid_t) - 1); 9797 ASSERT(ecb->dte_size <= ecb->dte_needed); 9798 } 9799 9800 static dtrace_action_t * 9801 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9802 { 9803 dtrace_aggregation_t *agg; 9804 size_t size = sizeof (uint64_t); 9805 int ntuple = desc->dtad_ntuple; 9806 dtrace_action_t *act; 9807 dtrace_recdesc_t *frec; 9808 dtrace_aggid_t aggid; 9809 dtrace_state_t *state = ecb->dte_state; 9810 9811 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9812 agg->dtag_ecb = ecb; 9813 9814 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9815 9816 switch (desc->dtad_kind) { 9817 case DTRACEAGG_MIN: 9818 agg->dtag_initial = INT64_MAX; 9819 agg->dtag_aggregate = dtrace_aggregate_min; 9820 break; 9821 9822 case DTRACEAGG_MAX: 9823 agg->dtag_initial = INT64_MIN; 9824 agg->dtag_aggregate = dtrace_aggregate_max; 9825 break; 9826 9827 case DTRACEAGG_COUNT: 9828 agg->dtag_aggregate = dtrace_aggregate_count; 9829 break; 9830 9831 case DTRACEAGG_QUANTIZE: 9832 agg->dtag_aggregate = dtrace_aggregate_quantize; 9833 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9834 sizeof (uint64_t); 9835 break; 9836 9837 case DTRACEAGG_LQUANTIZE: { 9838 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9839 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9840 9841 agg->dtag_initial = desc->dtad_arg; 9842 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9843 9844 if (step == 0 || levels == 0) 9845 goto err; 9846 9847 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9848 break; 9849 } 9850 9851 case DTRACEAGG_AVG: 9852 agg->dtag_aggregate = dtrace_aggregate_avg; 9853 size = sizeof (uint64_t) * 2; 9854 break; 9855 9856 case DTRACEAGG_STDDEV: 9857 agg->dtag_aggregate = dtrace_aggregate_stddev; 9858 size = sizeof (uint64_t) * 4; 9859 break; 9860 9861 case DTRACEAGG_SUM: 9862 agg->dtag_aggregate = dtrace_aggregate_sum; 9863 break; 9864 9865 default: 9866 goto err; 9867 } 9868 9869 agg->dtag_action.dta_rec.dtrd_size = size; 9870 9871 if (ntuple == 0) 9872 goto err; 9873 9874 /* 9875 * We must make sure that we have enough actions for the n-tuple. 9876 */ 9877 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9878 if (DTRACEACT_ISAGG(act->dta_kind)) 9879 break; 9880 9881 if (--ntuple == 0) { 9882 /* 9883 * This is the action with which our n-tuple begins. 9884 */ 9885 agg->dtag_first = act; 9886 goto success; 9887 } 9888 } 9889 9890 /* 9891 * This n-tuple is short by ntuple elements. Return failure. 9892 */ 9893 ASSERT(ntuple != 0); 9894 err: 9895 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9896 return (NULL); 9897 9898 success: 9899 /* 9900 * If the last action in the tuple has a size of zero, it's actually 9901 * an expression argument for the aggregating action. 9902 */ 9903 ASSERT(ecb->dte_action_last != NULL); 9904 act = ecb->dte_action_last; 9905 9906 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9907 ASSERT(act->dta_difo != NULL); 9908 9909 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9910 agg->dtag_hasarg = 1; 9911 } 9912 9913 /* 9914 * We need to allocate an id for this aggregation. 9915 */ 9916 #if defined(sun) 9917 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9918 VM_BESTFIT | VM_SLEEP); 9919 #else 9920 aggid = alloc_unr(state->dts_aggid_arena); 9921 #endif 9922 9923 if (aggid - 1 >= state->dts_naggregations) { 9924 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9925 dtrace_aggregation_t **aggs; 9926 int naggs = state->dts_naggregations << 1; 9927 int onaggs = state->dts_naggregations; 9928 9929 ASSERT(aggid == state->dts_naggregations + 1); 9930 9931 if (naggs == 0) { 9932 ASSERT(oaggs == NULL); 9933 naggs = 1; 9934 } 9935 9936 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9937 9938 if (oaggs != NULL) { 9939 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9940 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9941 } 9942 9943 state->dts_aggregations = aggs; 9944 state->dts_naggregations = naggs; 9945 } 9946 9947 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9948 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9949 9950 frec = &agg->dtag_first->dta_rec; 9951 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9952 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9953 9954 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9955 ASSERT(!act->dta_intuple); 9956 act->dta_intuple = 1; 9957 } 9958 9959 return (&agg->dtag_action); 9960 } 9961 9962 static void 9963 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9964 { 9965 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9966 dtrace_state_t *state = ecb->dte_state; 9967 dtrace_aggid_t aggid = agg->dtag_id; 9968 9969 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9970 #if defined(sun) 9971 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9972 #else 9973 free_unr(state->dts_aggid_arena, aggid); 9974 #endif 9975 9976 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9977 state->dts_aggregations[aggid - 1] = NULL; 9978 9979 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9980 } 9981 9982 static int 9983 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9984 { 9985 dtrace_action_t *action, *last; 9986 dtrace_difo_t *dp = desc->dtad_difo; 9987 uint32_t size = 0, align = sizeof (uint8_t), mask; 9988 uint16_t format = 0; 9989 dtrace_recdesc_t *rec; 9990 dtrace_state_t *state = ecb->dte_state; 9991 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 9992 uint64_t arg = desc->dtad_arg; 9993 9994 ASSERT(MUTEX_HELD(&dtrace_lock)); 9995 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9996 9997 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9998 /* 9999 * If this is an aggregating action, there must be neither 10000 * a speculate nor a commit on the action chain. 10001 */ 10002 dtrace_action_t *act; 10003 10004 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10005 if (act->dta_kind == DTRACEACT_COMMIT) 10006 return (EINVAL); 10007 10008 if (act->dta_kind == DTRACEACT_SPECULATE) 10009 return (EINVAL); 10010 } 10011 10012 action = dtrace_ecb_aggregation_create(ecb, desc); 10013 10014 if (action == NULL) 10015 return (EINVAL); 10016 } else { 10017 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10018 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10019 dp != NULL && dp->dtdo_destructive)) { 10020 state->dts_destructive = 1; 10021 } 10022 10023 switch (desc->dtad_kind) { 10024 case DTRACEACT_PRINTF: 10025 case DTRACEACT_PRINTA: 10026 case DTRACEACT_SYSTEM: 10027 case DTRACEACT_FREOPEN: 10028 /* 10029 * We know that our arg is a string -- turn it into a 10030 * format. 10031 */ 10032 if (arg == 0) { 10033 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10034 format = 0; 10035 } else { 10036 ASSERT(arg != 0); 10037 #if defined(sun) 10038 ASSERT(arg > KERNELBASE); 10039 #endif 10040 format = dtrace_format_add(state, 10041 (char *)(uintptr_t)arg); 10042 } 10043 10044 /*FALLTHROUGH*/ 10045 case DTRACEACT_LIBACT: 10046 case DTRACEACT_DIFEXPR: 10047 if (dp == NULL) 10048 return (EINVAL); 10049 10050 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10051 break; 10052 10053 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10054 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10055 return (EINVAL); 10056 10057 size = opt[DTRACEOPT_STRSIZE]; 10058 } 10059 10060 break; 10061 10062 case DTRACEACT_STACK: 10063 if ((nframes = arg) == 0) { 10064 nframes = opt[DTRACEOPT_STACKFRAMES]; 10065 ASSERT(nframes > 0); 10066 arg = nframes; 10067 } 10068 10069 size = nframes * sizeof (pc_t); 10070 break; 10071 10072 case DTRACEACT_JSTACK: 10073 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10074 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10075 10076 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10077 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10078 10079 arg = DTRACE_USTACK_ARG(nframes, strsize); 10080 10081 /*FALLTHROUGH*/ 10082 case DTRACEACT_USTACK: 10083 if (desc->dtad_kind != DTRACEACT_JSTACK && 10084 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10085 strsize = DTRACE_USTACK_STRSIZE(arg); 10086 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10087 ASSERT(nframes > 0); 10088 arg = DTRACE_USTACK_ARG(nframes, strsize); 10089 } 10090 10091 /* 10092 * Save a slot for the pid. 10093 */ 10094 size = (nframes + 1) * sizeof (uint64_t); 10095 size += DTRACE_USTACK_STRSIZE(arg); 10096 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10097 10098 break; 10099 10100 case DTRACEACT_SYM: 10101 case DTRACEACT_MOD: 10102 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10103 sizeof (uint64_t)) || 10104 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10105 return (EINVAL); 10106 break; 10107 10108 case DTRACEACT_USYM: 10109 case DTRACEACT_UMOD: 10110 case DTRACEACT_UADDR: 10111 if (dp == NULL || 10112 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10113 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10114 return (EINVAL); 10115 10116 /* 10117 * We have a slot for the pid, plus a slot for the 10118 * argument. To keep things simple (aligned with 10119 * bitness-neutral sizing), we store each as a 64-bit 10120 * quantity. 10121 */ 10122 size = 2 * sizeof (uint64_t); 10123 break; 10124 10125 case DTRACEACT_STOP: 10126 case DTRACEACT_BREAKPOINT: 10127 case DTRACEACT_PANIC: 10128 break; 10129 10130 case DTRACEACT_CHILL: 10131 case DTRACEACT_DISCARD: 10132 case DTRACEACT_RAISE: 10133 if (dp == NULL) 10134 return (EINVAL); 10135 break; 10136 10137 case DTRACEACT_EXIT: 10138 if (dp == NULL || 10139 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10140 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10141 return (EINVAL); 10142 break; 10143 10144 case DTRACEACT_SPECULATE: 10145 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10146 return (EINVAL); 10147 10148 if (dp == NULL) 10149 return (EINVAL); 10150 10151 state->dts_speculates = 1; 10152 break; 10153 10154 case DTRACEACT_PRINTM: 10155 size = dp->dtdo_rtype.dtdt_size; 10156 break; 10157 10158 case DTRACEACT_PRINTT: 10159 size = dp->dtdo_rtype.dtdt_size; 10160 break; 10161 10162 case DTRACEACT_COMMIT: { 10163 dtrace_action_t *act = ecb->dte_action; 10164 10165 for (; act != NULL; act = act->dta_next) { 10166 if (act->dta_kind == DTRACEACT_COMMIT) 10167 return (EINVAL); 10168 } 10169 10170 if (dp == NULL) 10171 return (EINVAL); 10172 break; 10173 } 10174 10175 default: 10176 return (EINVAL); 10177 } 10178 10179 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10180 /* 10181 * If this is a data-storing action or a speculate, 10182 * we must be sure that there isn't a commit on the 10183 * action chain. 10184 */ 10185 dtrace_action_t *act = ecb->dte_action; 10186 10187 for (; act != NULL; act = act->dta_next) { 10188 if (act->dta_kind == DTRACEACT_COMMIT) 10189 return (EINVAL); 10190 } 10191 } 10192 10193 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10194 action->dta_rec.dtrd_size = size; 10195 } 10196 10197 action->dta_refcnt = 1; 10198 rec = &action->dta_rec; 10199 size = rec->dtrd_size; 10200 10201 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10202 if (!(size & mask)) { 10203 align = mask + 1; 10204 break; 10205 } 10206 } 10207 10208 action->dta_kind = desc->dtad_kind; 10209 10210 if ((action->dta_difo = dp) != NULL) 10211 dtrace_difo_hold(dp); 10212 10213 rec->dtrd_action = action->dta_kind; 10214 rec->dtrd_arg = arg; 10215 rec->dtrd_uarg = desc->dtad_uarg; 10216 rec->dtrd_alignment = (uint16_t)align; 10217 rec->dtrd_format = format; 10218 10219 if ((last = ecb->dte_action_last) != NULL) { 10220 ASSERT(ecb->dte_action != NULL); 10221 action->dta_prev = last; 10222 last->dta_next = action; 10223 } else { 10224 ASSERT(ecb->dte_action == NULL); 10225 ecb->dte_action = action; 10226 } 10227 10228 ecb->dte_action_last = action; 10229 10230 return (0); 10231 } 10232 10233 static void 10234 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10235 { 10236 dtrace_action_t *act = ecb->dte_action, *next; 10237 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10238 dtrace_difo_t *dp; 10239 uint16_t format; 10240 10241 if (act != NULL && act->dta_refcnt > 1) { 10242 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10243 act->dta_refcnt--; 10244 } else { 10245 for (; act != NULL; act = next) { 10246 next = act->dta_next; 10247 ASSERT(next != NULL || act == ecb->dte_action_last); 10248 ASSERT(act->dta_refcnt == 1); 10249 10250 if ((format = act->dta_rec.dtrd_format) != 0) 10251 dtrace_format_remove(ecb->dte_state, format); 10252 10253 if ((dp = act->dta_difo) != NULL) 10254 dtrace_difo_release(dp, vstate); 10255 10256 if (DTRACEACT_ISAGG(act->dta_kind)) { 10257 dtrace_ecb_aggregation_destroy(ecb, act); 10258 } else { 10259 kmem_free(act, sizeof (dtrace_action_t)); 10260 } 10261 } 10262 } 10263 10264 ecb->dte_action = NULL; 10265 ecb->dte_action_last = NULL; 10266 ecb->dte_size = sizeof (dtrace_epid_t); 10267 } 10268 10269 static void 10270 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10271 { 10272 /* 10273 * We disable the ECB by removing it from its probe. 10274 */ 10275 dtrace_ecb_t *pecb, *prev = NULL; 10276 dtrace_probe_t *probe = ecb->dte_probe; 10277 10278 ASSERT(MUTEX_HELD(&dtrace_lock)); 10279 10280 if (probe == NULL) { 10281 /* 10282 * This is the NULL probe; there is nothing to disable. 10283 */ 10284 return; 10285 } 10286 10287 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10288 if (pecb == ecb) 10289 break; 10290 prev = pecb; 10291 } 10292 10293 ASSERT(pecb != NULL); 10294 10295 if (prev == NULL) { 10296 probe->dtpr_ecb = ecb->dte_next; 10297 } else { 10298 prev->dte_next = ecb->dte_next; 10299 } 10300 10301 if (ecb == probe->dtpr_ecb_last) { 10302 ASSERT(ecb->dte_next == NULL); 10303 probe->dtpr_ecb_last = prev; 10304 } 10305 10306 /* 10307 * The ECB has been disconnected from the probe; now sync to assure 10308 * that all CPUs have seen the change before returning. 10309 */ 10310 dtrace_sync(); 10311 10312 if (probe->dtpr_ecb == NULL) { 10313 /* 10314 * That was the last ECB on the probe; clear the predicate 10315 * cache ID for the probe, disable it and sync one more time 10316 * to assure that we'll never hit it again. 10317 */ 10318 dtrace_provider_t *prov = probe->dtpr_provider; 10319 10320 ASSERT(ecb->dte_next == NULL); 10321 ASSERT(probe->dtpr_ecb_last == NULL); 10322 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10323 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10324 probe->dtpr_id, probe->dtpr_arg); 10325 dtrace_sync(); 10326 } else { 10327 /* 10328 * There is at least one ECB remaining on the probe. If there 10329 * is _exactly_ one, set the probe's predicate cache ID to be 10330 * the predicate cache ID of the remaining ECB. 10331 */ 10332 ASSERT(probe->dtpr_ecb_last != NULL); 10333 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10334 10335 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10336 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10337 10338 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10339 10340 if (p != NULL) 10341 probe->dtpr_predcache = p->dtp_cacheid; 10342 } 10343 10344 ecb->dte_next = NULL; 10345 } 10346 } 10347 10348 static void 10349 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10350 { 10351 dtrace_state_t *state = ecb->dte_state; 10352 dtrace_vstate_t *vstate = &state->dts_vstate; 10353 dtrace_predicate_t *pred; 10354 dtrace_epid_t epid = ecb->dte_epid; 10355 10356 ASSERT(MUTEX_HELD(&dtrace_lock)); 10357 ASSERT(ecb->dte_next == NULL); 10358 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10359 10360 if ((pred = ecb->dte_predicate) != NULL) 10361 dtrace_predicate_release(pred, vstate); 10362 10363 dtrace_ecb_action_remove(ecb); 10364 10365 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10366 state->dts_ecbs[epid - 1] = NULL; 10367 10368 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10369 } 10370 10371 static dtrace_ecb_t * 10372 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10373 dtrace_enabling_t *enab) 10374 { 10375 dtrace_ecb_t *ecb; 10376 dtrace_predicate_t *pred; 10377 dtrace_actdesc_t *act; 10378 dtrace_provider_t *prov; 10379 dtrace_ecbdesc_t *desc = enab->dten_current; 10380 10381 ASSERT(MUTEX_HELD(&dtrace_lock)); 10382 ASSERT(state != NULL); 10383 10384 ecb = dtrace_ecb_add(state, probe); 10385 ecb->dte_uarg = desc->dted_uarg; 10386 10387 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10388 dtrace_predicate_hold(pred); 10389 ecb->dte_predicate = pred; 10390 } 10391 10392 if (probe != NULL) { 10393 /* 10394 * If the provider shows more leg than the consumer is old 10395 * enough to see, we need to enable the appropriate implicit 10396 * predicate bits to prevent the ecb from activating at 10397 * revealing times. 10398 * 10399 * Providers specifying DTRACE_PRIV_USER at register time 10400 * are stating that they need the /proc-style privilege 10401 * model to be enforced, and this is what DTRACE_COND_OWNER 10402 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10403 */ 10404 prov = probe->dtpr_provider; 10405 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10406 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10407 ecb->dte_cond |= DTRACE_COND_OWNER; 10408 10409 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10410 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10411 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10412 10413 /* 10414 * If the provider shows us kernel innards and the user 10415 * is lacking sufficient privilege, enable the 10416 * DTRACE_COND_USERMODE implicit predicate. 10417 */ 10418 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10419 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10420 ecb->dte_cond |= DTRACE_COND_USERMODE; 10421 } 10422 10423 if (dtrace_ecb_create_cache != NULL) { 10424 /* 10425 * If we have a cached ecb, we'll use its action list instead 10426 * of creating our own (saving both time and space). 10427 */ 10428 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10429 dtrace_action_t *act = cached->dte_action; 10430 10431 if (act != NULL) { 10432 ASSERT(act->dta_refcnt > 0); 10433 act->dta_refcnt++; 10434 ecb->dte_action = act; 10435 ecb->dte_action_last = cached->dte_action_last; 10436 ecb->dte_needed = cached->dte_needed; 10437 ecb->dte_size = cached->dte_size; 10438 ecb->dte_alignment = cached->dte_alignment; 10439 } 10440 10441 return (ecb); 10442 } 10443 10444 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10445 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10446 dtrace_ecb_destroy(ecb); 10447 return (NULL); 10448 } 10449 } 10450 10451 dtrace_ecb_resize(ecb); 10452 10453 return (dtrace_ecb_create_cache = ecb); 10454 } 10455 10456 static int 10457 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10458 { 10459 dtrace_ecb_t *ecb; 10460 dtrace_enabling_t *enab = arg; 10461 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10462 10463 ASSERT(state != NULL); 10464 10465 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10466 /* 10467 * This probe was created in a generation for which this 10468 * enabling has previously created ECBs; we don't want to 10469 * enable it again, so just kick out. 10470 */ 10471 return (DTRACE_MATCH_NEXT); 10472 } 10473 10474 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10475 return (DTRACE_MATCH_DONE); 10476 10477 dtrace_ecb_enable(ecb); 10478 return (DTRACE_MATCH_NEXT); 10479 } 10480 10481 static dtrace_ecb_t * 10482 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10483 { 10484 dtrace_ecb_t *ecb; 10485 10486 ASSERT(MUTEX_HELD(&dtrace_lock)); 10487 10488 if (id == 0 || id > state->dts_necbs) 10489 return (NULL); 10490 10491 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10492 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10493 10494 return (state->dts_ecbs[id - 1]); 10495 } 10496 10497 static dtrace_aggregation_t * 10498 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10499 { 10500 dtrace_aggregation_t *agg; 10501 10502 ASSERT(MUTEX_HELD(&dtrace_lock)); 10503 10504 if (id == 0 || id > state->dts_naggregations) 10505 return (NULL); 10506 10507 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10508 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10509 agg->dtag_id == id); 10510 10511 return (state->dts_aggregations[id - 1]); 10512 } 10513 10514 /* 10515 * DTrace Buffer Functions 10516 * 10517 * The following functions manipulate DTrace buffers. Most of these functions 10518 * are called in the context of establishing or processing consumer state; 10519 * exceptions are explicitly noted. 10520 */ 10521 10522 /* 10523 * Note: called from cross call context. This function switches the two 10524 * buffers on a given CPU. The atomicity of this operation is assured by 10525 * disabling interrupts while the actual switch takes place; the disabling of 10526 * interrupts serializes the execution with any execution of dtrace_probe() on 10527 * the same CPU. 10528 */ 10529 static void 10530 dtrace_buffer_switch(dtrace_buffer_t *buf) 10531 { 10532 caddr_t tomax = buf->dtb_tomax; 10533 caddr_t xamot = buf->dtb_xamot; 10534 dtrace_icookie_t cookie; 10535 10536 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10537 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10538 10539 cookie = dtrace_interrupt_disable(); 10540 buf->dtb_tomax = xamot; 10541 buf->dtb_xamot = tomax; 10542 buf->dtb_xamot_drops = buf->dtb_drops; 10543 buf->dtb_xamot_offset = buf->dtb_offset; 10544 buf->dtb_xamot_errors = buf->dtb_errors; 10545 buf->dtb_xamot_flags = buf->dtb_flags; 10546 buf->dtb_offset = 0; 10547 buf->dtb_drops = 0; 10548 buf->dtb_errors = 0; 10549 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10550 dtrace_interrupt_enable(cookie); 10551 } 10552 10553 /* 10554 * Note: called from cross call context. This function activates a buffer 10555 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10556 * is guaranteed by the disabling of interrupts. 10557 */ 10558 static void 10559 dtrace_buffer_activate(dtrace_state_t *state) 10560 { 10561 dtrace_buffer_t *buf; 10562 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10563 10564 buf = &state->dts_buffer[curcpu]; 10565 10566 if (buf->dtb_tomax != NULL) { 10567 /* 10568 * We might like to assert that the buffer is marked inactive, 10569 * but this isn't necessarily true: the buffer for the CPU 10570 * that processes the BEGIN probe has its buffer activated 10571 * manually. In this case, we take the (harmless) action 10572 * re-clearing the bit INACTIVE bit. 10573 */ 10574 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10575 } 10576 10577 dtrace_interrupt_enable(cookie); 10578 } 10579 10580 static int 10581 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10582 processorid_t cpu) 10583 { 10584 #if defined(sun) 10585 cpu_t *cp; 10586 #else 10587 struct pcpu *cp; 10588 #endif 10589 dtrace_buffer_t *buf; 10590 10591 #if defined(sun) 10592 ASSERT(MUTEX_HELD(&cpu_lock)); 10593 ASSERT(MUTEX_HELD(&dtrace_lock)); 10594 10595 if (size > dtrace_nonroot_maxsize && 10596 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10597 return (EFBIG); 10598 10599 cp = cpu_list; 10600 10601 do { 10602 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10603 continue; 10604 10605 buf = &bufs[cp->cpu_id]; 10606 10607 /* 10608 * If there is already a buffer allocated for this CPU, it 10609 * is only possible that this is a DR event. In this case, 10610 */ 10611 if (buf->dtb_tomax != NULL) { 10612 ASSERT(buf->dtb_size == size); 10613 continue; 10614 } 10615 10616 ASSERT(buf->dtb_xamot == NULL); 10617 10618 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10619 goto err; 10620 10621 buf->dtb_size = size; 10622 buf->dtb_flags = flags; 10623 buf->dtb_offset = 0; 10624 buf->dtb_drops = 0; 10625 10626 if (flags & DTRACEBUF_NOSWITCH) 10627 continue; 10628 10629 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10630 goto err; 10631 } while ((cp = cp->cpu_next) != cpu_list); 10632 10633 return (0); 10634 10635 err: 10636 cp = cpu_list; 10637 10638 do { 10639 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10640 continue; 10641 10642 buf = &bufs[cp->cpu_id]; 10643 10644 if (buf->dtb_xamot != NULL) { 10645 ASSERT(buf->dtb_tomax != NULL); 10646 ASSERT(buf->dtb_size == size); 10647 kmem_free(buf->dtb_xamot, size); 10648 } 10649 10650 if (buf->dtb_tomax != NULL) { 10651 ASSERT(buf->dtb_size == size); 10652 kmem_free(buf->dtb_tomax, size); 10653 } 10654 10655 buf->dtb_tomax = NULL; 10656 buf->dtb_xamot = NULL; 10657 buf->dtb_size = 0; 10658 } while ((cp = cp->cpu_next) != cpu_list); 10659 10660 return (ENOMEM); 10661 #else 10662 int i; 10663 10664 #if defined(__amd64__) 10665 /* 10666 * FreeBSD isn't good at limiting the amount of memory we 10667 * ask to malloc, so let's place a limit here before trying 10668 * to do something that might well end in tears at bedtime. 10669 */ 10670 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10671 return(ENOMEM); 10672 #endif 10673 10674 ASSERT(MUTEX_HELD(&dtrace_lock)); 10675 for (i = 0; i <= mp_maxid; i++) { 10676 if ((cp = pcpu_find(i)) == NULL) 10677 continue; 10678 10679 if (cpu != DTRACE_CPUALL && cpu != i) 10680 continue; 10681 10682 buf = &bufs[i]; 10683 10684 /* 10685 * If there is already a buffer allocated for this CPU, it 10686 * is only possible that this is a DR event. In this case, 10687 * the buffer size must match our specified size. 10688 */ 10689 if (buf->dtb_tomax != NULL) { 10690 ASSERT(buf->dtb_size == size); 10691 continue; 10692 } 10693 10694 ASSERT(buf->dtb_xamot == NULL); 10695 10696 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10697 goto err; 10698 10699 buf->dtb_size = size; 10700 buf->dtb_flags = flags; 10701 buf->dtb_offset = 0; 10702 buf->dtb_drops = 0; 10703 10704 if (flags & DTRACEBUF_NOSWITCH) 10705 continue; 10706 10707 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10708 goto err; 10709 } 10710 10711 return (0); 10712 10713 err: 10714 /* 10715 * Error allocating memory, so free the buffers that were 10716 * allocated before the failed allocation. 10717 */ 10718 for (i = 0; i <= mp_maxid; i++) { 10719 if ((cp = pcpu_find(i)) == NULL) 10720 continue; 10721 10722 if (cpu != DTRACE_CPUALL && cpu != i) 10723 continue; 10724 10725 buf = &bufs[i]; 10726 10727 if (buf->dtb_xamot != NULL) { 10728 ASSERT(buf->dtb_tomax != NULL); 10729 ASSERT(buf->dtb_size == size); 10730 kmem_free(buf->dtb_xamot, size); 10731 } 10732 10733 if (buf->dtb_tomax != NULL) { 10734 ASSERT(buf->dtb_size == size); 10735 kmem_free(buf->dtb_tomax, size); 10736 } 10737 10738 buf->dtb_tomax = NULL; 10739 buf->dtb_xamot = NULL; 10740 buf->dtb_size = 0; 10741 10742 } 10743 10744 return (ENOMEM); 10745 #endif 10746 } 10747 10748 /* 10749 * Note: called from probe context. This function just increments the drop 10750 * count on a buffer. It has been made a function to allow for the 10751 * possibility of understanding the source of mysterious drop counts. (A 10752 * problem for which one may be particularly disappointed that DTrace cannot 10753 * be used to understand DTrace.) 10754 */ 10755 static void 10756 dtrace_buffer_drop(dtrace_buffer_t *buf) 10757 { 10758 buf->dtb_drops++; 10759 } 10760 10761 /* 10762 * Note: called from probe context. This function is called to reserve space 10763 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10764 * mstate. Returns the new offset in the buffer, or a negative value if an 10765 * error has occurred. 10766 */ 10767 static intptr_t 10768 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10769 dtrace_state_t *state, dtrace_mstate_t *mstate) 10770 { 10771 intptr_t offs = buf->dtb_offset, soffs; 10772 intptr_t woffs; 10773 caddr_t tomax; 10774 size_t total; 10775 10776 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10777 return (-1); 10778 10779 if ((tomax = buf->dtb_tomax) == NULL) { 10780 dtrace_buffer_drop(buf); 10781 return (-1); 10782 } 10783 10784 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10785 while (offs & (align - 1)) { 10786 /* 10787 * Assert that our alignment is off by a number which 10788 * is itself sizeof (uint32_t) aligned. 10789 */ 10790 ASSERT(!((align - (offs & (align - 1))) & 10791 (sizeof (uint32_t) - 1))); 10792 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10793 offs += sizeof (uint32_t); 10794 } 10795 10796 if ((soffs = offs + needed) > buf->dtb_size) { 10797 dtrace_buffer_drop(buf); 10798 return (-1); 10799 } 10800 10801 if (mstate == NULL) 10802 return (offs); 10803 10804 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10805 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10806 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10807 10808 return (offs); 10809 } 10810 10811 if (buf->dtb_flags & DTRACEBUF_FILL) { 10812 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10813 (buf->dtb_flags & DTRACEBUF_FULL)) 10814 return (-1); 10815 goto out; 10816 } 10817 10818 total = needed + (offs & (align - 1)); 10819 10820 /* 10821 * For a ring buffer, life is quite a bit more complicated. Before 10822 * we can store any padding, we need to adjust our wrapping offset. 10823 * (If we've never before wrapped or we're not about to, no adjustment 10824 * is required.) 10825 */ 10826 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10827 offs + total > buf->dtb_size) { 10828 woffs = buf->dtb_xamot_offset; 10829 10830 if (offs + total > buf->dtb_size) { 10831 /* 10832 * We can't fit in the end of the buffer. First, a 10833 * sanity check that we can fit in the buffer at all. 10834 */ 10835 if (total > buf->dtb_size) { 10836 dtrace_buffer_drop(buf); 10837 return (-1); 10838 } 10839 10840 /* 10841 * We're going to be storing at the top of the buffer, 10842 * so now we need to deal with the wrapped offset. We 10843 * only reset our wrapped offset to 0 if it is 10844 * currently greater than the current offset. If it 10845 * is less than the current offset, it is because a 10846 * previous allocation induced a wrap -- but the 10847 * allocation didn't subsequently take the space due 10848 * to an error or false predicate evaluation. In this 10849 * case, we'll just leave the wrapped offset alone: if 10850 * the wrapped offset hasn't been advanced far enough 10851 * for this allocation, it will be adjusted in the 10852 * lower loop. 10853 */ 10854 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10855 if (woffs >= offs) 10856 woffs = 0; 10857 } else { 10858 woffs = 0; 10859 } 10860 10861 /* 10862 * Now we know that we're going to be storing to the 10863 * top of the buffer and that there is room for us 10864 * there. We need to clear the buffer from the current 10865 * offset to the end (there may be old gunk there). 10866 */ 10867 while (offs < buf->dtb_size) 10868 tomax[offs++] = 0; 10869 10870 /* 10871 * We need to set our offset to zero. And because we 10872 * are wrapping, we need to set the bit indicating as 10873 * much. We can also adjust our needed space back 10874 * down to the space required by the ECB -- we know 10875 * that the top of the buffer is aligned. 10876 */ 10877 offs = 0; 10878 total = needed; 10879 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10880 } else { 10881 /* 10882 * There is room for us in the buffer, so we simply 10883 * need to check the wrapped offset. 10884 */ 10885 if (woffs < offs) { 10886 /* 10887 * The wrapped offset is less than the offset. 10888 * This can happen if we allocated buffer space 10889 * that induced a wrap, but then we didn't 10890 * subsequently take the space due to an error 10891 * or false predicate evaluation. This is 10892 * okay; we know that _this_ allocation isn't 10893 * going to induce a wrap. We still can't 10894 * reset the wrapped offset to be zero, 10895 * however: the space may have been trashed in 10896 * the previous failed probe attempt. But at 10897 * least the wrapped offset doesn't need to 10898 * be adjusted at all... 10899 */ 10900 goto out; 10901 } 10902 } 10903 10904 while (offs + total > woffs) { 10905 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10906 size_t size; 10907 10908 if (epid == DTRACE_EPIDNONE) { 10909 size = sizeof (uint32_t); 10910 } else { 10911 ASSERT(epid <= state->dts_necbs); 10912 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10913 10914 size = state->dts_ecbs[epid - 1]->dte_size; 10915 } 10916 10917 ASSERT(woffs + size <= buf->dtb_size); 10918 ASSERT(size != 0); 10919 10920 if (woffs + size == buf->dtb_size) { 10921 /* 10922 * We've reached the end of the buffer; we want 10923 * to set the wrapped offset to 0 and break 10924 * out. However, if the offs is 0, then we're 10925 * in a strange edge-condition: the amount of 10926 * space that we want to reserve plus the size 10927 * of the record that we're overwriting is 10928 * greater than the size of the buffer. This 10929 * is problematic because if we reserve the 10930 * space but subsequently don't consume it (due 10931 * to a failed predicate or error) the wrapped 10932 * offset will be 0 -- yet the EPID at offset 0 10933 * will not be committed. This situation is 10934 * relatively easy to deal with: if we're in 10935 * this case, the buffer is indistinguishable 10936 * from one that hasn't wrapped; we need only 10937 * finish the job by clearing the wrapped bit, 10938 * explicitly setting the offset to be 0, and 10939 * zero'ing out the old data in the buffer. 10940 */ 10941 if (offs == 0) { 10942 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10943 buf->dtb_offset = 0; 10944 woffs = total; 10945 10946 while (woffs < buf->dtb_size) 10947 tomax[woffs++] = 0; 10948 } 10949 10950 woffs = 0; 10951 break; 10952 } 10953 10954 woffs += size; 10955 } 10956 10957 /* 10958 * We have a wrapped offset. It may be that the wrapped offset 10959 * has become zero -- that's okay. 10960 */ 10961 buf->dtb_xamot_offset = woffs; 10962 } 10963 10964 out: 10965 /* 10966 * Now we can plow the buffer with any necessary padding. 10967 */ 10968 while (offs & (align - 1)) { 10969 /* 10970 * Assert that our alignment is off by a number which 10971 * is itself sizeof (uint32_t) aligned. 10972 */ 10973 ASSERT(!((align - (offs & (align - 1))) & 10974 (sizeof (uint32_t) - 1))); 10975 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10976 offs += sizeof (uint32_t); 10977 } 10978 10979 if (buf->dtb_flags & DTRACEBUF_FILL) { 10980 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10981 buf->dtb_flags |= DTRACEBUF_FULL; 10982 return (-1); 10983 } 10984 } 10985 10986 if (mstate == NULL) 10987 return (offs); 10988 10989 /* 10990 * For ring buffers and fill buffers, the scratch space is always 10991 * the inactive buffer. 10992 */ 10993 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10994 mstate->dtms_scratch_size = buf->dtb_size; 10995 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10996 10997 return (offs); 10998 } 10999 11000 static void 11001 dtrace_buffer_polish(dtrace_buffer_t *buf) 11002 { 11003 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11004 ASSERT(MUTEX_HELD(&dtrace_lock)); 11005 11006 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11007 return; 11008 11009 /* 11010 * We need to polish the ring buffer. There are three cases: 11011 * 11012 * - The first (and presumably most common) is that there is no gap 11013 * between the buffer offset and the wrapped offset. In this case, 11014 * there is nothing in the buffer that isn't valid data; we can 11015 * mark the buffer as polished and return. 11016 * 11017 * - The second (less common than the first but still more common 11018 * than the third) is that there is a gap between the buffer offset 11019 * and the wrapped offset, and the wrapped offset is larger than the 11020 * buffer offset. This can happen because of an alignment issue, or 11021 * can happen because of a call to dtrace_buffer_reserve() that 11022 * didn't subsequently consume the buffer space. In this case, 11023 * we need to zero the data from the buffer offset to the wrapped 11024 * offset. 11025 * 11026 * - The third (and least common) is that there is a gap between the 11027 * buffer offset and the wrapped offset, but the wrapped offset is 11028 * _less_ than the buffer offset. This can only happen because a 11029 * call to dtrace_buffer_reserve() induced a wrap, but the space 11030 * was not subsequently consumed. In this case, we need to zero the 11031 * space from the offset to the end of the buffer _and_ from the 11032 * top of the buffer to the wrapped offset. 11033 */ 11034 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11035 bzero(buf->dtb_tomax + buf->dtb_offset, 11036 buf->dtb_xamot_offset - buf->dtb_offset); 11037 } 11038 11039 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11040 bzero(buf->dtb_tomax + buf->dtb_offset, 11041 buf->dtb_size - buf->dtb_offset); 11042 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11043 } 11044 } 11045 11046 static void 11047 dtrace_buffer_free(dtrace_buffer_t *bufs) 11048 { 11049 int i; 11050 11051 for (i = 0; i < NCPU; i++) { 11052 dtrace_buffer_t *buf = &bufs[i]; 11053 11054 if (buf->dtb_tomax == NULL) { 11055 ASSERT(buf->dtb_xamot == NULL); 11056 ASSERT(buf->dtb_size == 0); 11057 continue; 11058 } 11059 11060 if (buf->dtb_xamot != NULL) { 11061 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11062 kmem_free(buf->dtb_xamot, buf->dtb_size); 11063 } 11064 11065 kmem_free(buf->dtb_tomax, buf->dtb_size); 11066 buf->dtb_size = 0; 11067 buf->dtb_tomax = NULL; 11068 buf->dtb_xamot = NULL; 11069 } 11070 } 11071 11072 /* 11073 * DTrace Enabling Functions 11074 */ 11075 static dtrace_enabling_t * 11076 dtrace_enabling_create(dtrace_vstate_t *vstate) 11077 { 11078 dtrace_enabling_t *enab; 11079 11080 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11081 enab->dten_vstate = vstate; 11082 11083 return (enab); 11084 } 11085 11086 static void 11087 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11088 { 11089 dtrace_ecbdesc_t **ndesc; 11090 size_t osize, nsize; 11091 11092 /* 11093 * We can't add to enablings after we've enabled them, or after we've 11094 * retained them. 11095 */ 11096 ASSERT(enab->dten_probegen == 0); 11097 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11098 11099 if (enab->dten_ndesc < enab->dten_maxdesc) { 11100 enab->dten_desc[enab->dten_ndesc++] = ecb; 11101 return; 11102 } 11103 11104 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11105 11106 if (enab->dten_maxdesc == 0) { 11107 enab->dten_maxdesc = 1; 11108 } else { 11109 enab->dten_maxdesc <<= 1; 11110 } 11111 11112 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11113 11114 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11115 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11116 bcopy(enab->dten_desc, ndesc, osize); 11117 if (enab->dten_desc != NULL) 11118 kmem_free(enab->dten_desc, osize); 11119 11120 enab->dten_desc = ndesc; 11121 enab->dten_desc[enab->dten_ndesc++] = ecb; 11122 } 11123 11124 static void 11125 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11126 dtrace_probedesc_t *pd) 11127 { 11128 dtrace_ecbdesc_t *new; 11129 dtrace_predicate_t *pred; 11130 dtrace_actdesc_t *act; 11131 11132 /* 11133 * We're going to create a new ECB description that matches the 11134 * specified ECB in every way, but has the specified probe description. 11135 */ 11136 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11137 11138 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11139 dtrace_predicate_hold(pred); 11140 11141 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11142 dtrace_actdesc_hold(act); 11143 11144 new->dted_action = ecb->dted_action; 11145 new->dted_pred = ecb->dted_pred; 11146 new->dted_probe = *pd; 11147 new->dted_uarg = ecb->dted_uarg; 11148 11149 dtrace_enabling_add(enab, new); 11150 } 11151 11152 static void 11153 dtrace_enabling_dump(dtrace_enabling_t *enab) 11154 { 11155 int i; 11156 11157 for (i = 0; i < enab->dten_ndesc; i++) { 11158 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11159 11160 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11161 desc->dtpd_provider, desc->dtpd_mod, 11162 desc->dtpd_func, desc->dtpd_name); 11163 } 11164 } 11165 11166 static void 11167 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11168 { 11169 int i; 11170 dtrace_ecbdesc_t *ep; 11171 dtrace_vstate_t *vstate = enab->dten_vstate; 11172 11173 ASSERT(MUTEX_HELD(&dtrace_lock)); 11174 11175 for (i = 0; i < enab->dten_ndesc; i++) { 11176 dtrace_actdesc_t *act, *next; 11177 dtrace_predicate_t *pred; 11178 11179 ep = enab->dten_desc[i]; 11180 11181 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11182 dtrace_predicate_release(pred, vstate); 11183 11184 for (act = ep->dted_action; act != NULL; act = next) { 11185 next = act->dtad_next; 11186 dtrace_actdesc_release(act, vstate); 11187 } 11188 11189 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11190 } 11191 11192 if (enab->dten_desc != NULL) 11193 kmem_free(enab->dten_desc, 11194 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11195 11196 /* 11197 * If this was a retained enabling, decrement the dts_nretained count 11198 * and take it off of the dtrace_retained list. 11199 */ 11200 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11201 dtrace_retained == enab) { 11202 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11203 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11204 enab->dten_vstate->dtvs_state->dts_nretained--; 11205 } 11206 11207 if (enab->dten_prev == NULL) { 11208 if (dtrace_retained == enab) { 11209 dtrace_retained = enab->dten_next; 11210 11211 if (dtrace_retained != NULL) 11212 dtrace_retained->dten_prev = NULL; 11213 } 11214 } else { 11215 ASSERT(enab != dtrace_retained); 11216 ASSERT(dtrace_retained != NULL); 11217 enab->dten_prev->dten_next = enab->dten_next; 11218 } 11219 11220 if (enab->dten_next != NULL) { 11221 ASSERT(dtrace_retained != NULL); 11222 enab->dten_next->dten_prev = enab->dten_prev; 11223 } 11224 11225 kmem_free(enab, sizeof (dtrace_enabling_t)); 11226 } 11227 11228 static int 11229 dtrace_enabling_retain(dtrace_enabling_t *enab) 11230 { 11231 dtrace_state_t *state; 11232 11233 ASSERT(MUTEX_HELD(&dtrace_lock)); 11234 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11235 ASSERT(enab->dten_vstate != NULL); 11236 11237 state = enab->dten_vstate->dtvs_state; 11238 ASSERT(state != NULL); 11239 11240 /* 11241 * We only allow each state to retain dtrace_retain_max enablings. 11242 */ 11243 if (state->dts_nretained >= dtrace_retain_max) 11244 return (ENOSPC); 11245 11246 state->dts_nretained++; 11247 11248 if (dtrace_retained == NULL) { 11249 dtrace_retained = enab; 11250 return (0); 11251 } 11252 11253 enab->dten_next = dtrace_retained; 11254 dtrace_retained->dten_prev = enab; 11255 dtrace_retained = enab; 11256 11257 return (0); 11258 } 11259 11260 static int 11261 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11262 dtrace_probedesc_t *create) 11263 { 11264 dtrace_enabling_t *new, *enab; 11265 int found = 0, err = ENOENT; 11266 11267 ASSERT(MUTEX_HELD(&dtrace_lock)); 11268 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11269 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11270 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11271 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11272 11273 new = dtrace_enabling_create(&state->dts_vstate); 11274 11275 /* 11276 * Iterate over all retained enablings, looking for enablings that 11277 * match the specified state. 11278 */ 11279 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11280 int i; 11281 11282 /* 11283 * dtvs_state can only be NULL for helper enablings -- and 11284 * helper enablings can't be retained. 11285 */ 11286 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11287 11288 if (enab->dten_vstate->dtvs_state != state) 11289 continue; 11290 11291 /* 11292 * Now iterate over each probe description; we're looking for 11293 * an exact match to the specified probe description. 11294 */ 11295 for (i = 0; i < enab->dten_ndesc; i++) { 11296 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11297 dtrace_probedesc_t *pd = &ep->dted_probe; 11298 11299 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11300 continue; 11301 11302 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11303 continue; 11304 11305 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11306 continue; 11307 11308 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11309 continue; 11310 11311 /* 11312 * We have a winning probe! Add it to our growing 11313 * enabling. 11314 */ 11315 found = 1; 11316 dtrace_enabling_addlike(new, ep, create); 11317 } 11318 } 11319 11320 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11321 dtrace_enabling_destroy(new); 11322 return (err); 11323 } 11324 11325 return (0); 11326 } 11327 11328 static void 11329 dtrace_enabling_retract(dtrace_state_t *state) 11330 { 11331 dtrace_enabling_t *enab, *next; 11332 11333 ASSERT(MUTEX_HELD(&dtrace_lock)); 11334 11335 /* 11336 * Iterate over all retained enablings, destroy the enablings retained 11337 * for the specified state. 11338 */ 11339 for (enab = dtrace_retained; enab != NULL; enab = next) { 11340 next = enab->dten_next; 11341 11342 /* 11343 * dtvs_state can only be NULL for helper enablings -- and 11344 * helper enablings can't be retained. 11345 */ 11346 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11347 11348 if (enab->dten_vstate->dtvs_state == state) { 11349 ASSERT(state->dts_nretained > 0); 11350 dtrace_enabling_destroy(enab); 11351 } 11352 } 11353 11354 ASSERT(state->dts_nretained == 0); 11355 } 11356 11357 static int 11358 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11359 { 11360 int i = 0; 11361 int matched = 0; 11362 11363 ASSERT(MUTEX_HELD(&cpu_lock)); 11364 ASSERT(MUTEX_HELD(&dtrace_lock)); 11365 11366 for (i = 0; i < enab->dten_ndesc; i++) { 11367 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11368 11369 enab->dten_current = ep; 11370 enab->dten_error = 0; 11371 11372 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11373 11374 if (enab->dten_error != 0) { 11375 /* 11376 * If we get an error half-way through enabling the 11377 * probes, we kick out -- perhaps with some number of 11378 * them enabled. Leaving enabled probes enabled may 11379 * be slightly confusing for user-level, but we expect 11380 * that no one will attempt to actually drive on in 11381 * the face of such errors. If this is an anonymous 11382 * enabling (indicated with a NULL nmatched pointer), 11383 * we cmn_err() a message. We aren't expecting to 11384 * get such an error -- such as it can exist at all, 11385 * it would be a result of corrupted DOF in the driver 11386 * properties. 11387 */ 11388 if (nmatched == NULL) { 11389 cmn_err(CE_WARN, "dtrace_enabling_match() " 11390 "error on %p: %d", (void *)ep, 11391 enab->dten_error); 11392 } 11393 11394 return (enab->dten_error); 11395 } 11396 } 11397 11398 enab->dten_probegen = dtrace_probegen; 11399 if (nmatched != NULL) 11400 *nmatched = matched; 11401 11402 return (0); 11403 } 11404 11405 static void 11406 dtrace_enabling_matchall(void) 11407 { 11408 dtrace_enabling_t *enab; 11409 11410 mutex_enter(&cpu_lock); 11411 mutex_enter(&dtrace_lock); 11412 11413 /* 11414 * Iterate over all retained enablings to see if any probes match 11415 * against them. We only perform this operation on enablings for which 11416 * we have sufficient permissions by virtue of being in the global zone 11417 * or in the same zone as the DTrace client. Because we can be called 11418 * after dtrace_detach() has been called, we cannot assert that there 11419 * are retained enablings. We can safely load from dtrace_retained, 11420 * however: the taskq_destroy() at the end of dtrace_detach() will 11421 * block pending our completion. 11422 */ 11423 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11424 #if defined(sun) 11425 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11426 11427 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11428 #endif 11429 (void) dtrace_enabling_match(enab, NULL); 11430 } 11431 11432 mutex_exit(&dtrace_lock); 11433 mutex_exit(&cpu_lock); 11434 } 11435 11436 /* 11437 * If an enabling is to be enabled without having matched probes (that is, if 11438 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11439 * enabling must be _primed_ by creating an ECB for every ECB description. 11440 * This must be done to assure that we know the number of speculations, the 11441 * number of aggregations, the minimum buffer size needed, etc. before we 11442 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11443 * enabling any probes, we create ECBs for every ECB decription, but with a 11444 * NULL probe -- which is exactly what this function does. 11445 */ 11446 static void 11447 dtrace_enabling_prime(dtrace_state_t *state) 11448 { 11449 dtrace_enabling_t *enab; 11450 int i; 11451 11452 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11453 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11454 11455 if (enab->dten_vstate->dtvs_state != state) 11456 continue; 11457 11458 /* 11459 * We don't want to prime an enabling more than once, lest 11460 * we allow a malicious user to induce resource exhaustion. 11461 * (The ECBs that result from priming an enabling aren't 11462 * leaked -- but they also aren't deallocated until the 11463 * consumer state is destroyed.) 11464 */ 11465 if (enab->dten_primed) 11466 continue; 11467 11468 for (i = 0; i < enab->dten_ndesc; i++) { 11469 enab->dten_current = enab->dten_desc[i]; 11470 (void) dtrace_probe_enable(NULL, enab); 11471 } 11472 11473 enab->dten_primed = 1; 11474 } 11475 } 11476 11477 /* 11478 * Called to indicate that probes should be provided due to retained 11479 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11480 * must take an initial lap through the enabling calling the dtps_provide() 11481 * entry point explicitly to allow for autocreated probes. 11482 */ 11483 static void 11484 dtrace_enabling_provide(dtrace_provider_t *prv) 11485 { 11486 int i, all = 0; 11487 dtrace_probedesc_t desc; 11488 11489 ASSERT(MUTEX_HELD(&dtrace_lock)); 11490 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11491 11492 if (prv == NULL) { 11493 all = 1; 11494 prv = dtrace_provider; 11495 } 11496 11497 do { 11498 dtrace_enabling_t *enab = dtrace_retained; 11499 void *parg = prv->dtpv_arg; 11500 11501 for (; enab != NULL; enab = enab->dten_next) { 11502 for (i = 0; i < enab->dten_ndesc; i++) { 11503 desc = enab->dten_desc[i]->dted_probe; 11504 mutex_exit(&dtrace_lock); 11505 prv->dtpv_pops.dtps_provide(parg, &desc); 11506 mutex_enter(&dtrace_lock); 11507 } 11508 } 11509 } while (all && (prv = prv->dtpv_next) != NULL); 11510 11511 mutex_exit(&dtrace_lock); 11512 dtrace_probe_provide(NULL, all ? NULL : prv); 11513 mutex_enter(&dtrace_lock); 11514 } 11515 11516 /* 11517 * DTrace DOF Functions 11518 */ 11519 /*ARGSUSED*/ 11520 static void 11521 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11522 { 11523 if (dtrace_err_verbose) 11524 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11525 11526 #ifdef DTRACE_ERRDEBUG 11527 dtrace_errdebug(str); 11528 #endif 11529 } 11530 11531 /* 11532 * Create DOF out of a currently enabled state. Right now, we only create 11533 * DOF containing the run-time options -- but this could be expanded to create 11534 * complete DOF representing the enabled state. 11535 */ 11536 static dof_hdr_t * 11537 dtrace_dof_create(dtrace_state_t *state) 11538 { 11539 dof_hdr_t *dof; 11540 dof_sec_t *sec; 11541 dof_optdesc_t *opt; 11542 int i, len = sizeof (dof_hdr_t) + 11543 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11544 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11545 11546 ASSERT(MUTEX_HELD(&dtrace_lock)); 11547 11548 dof = kmem_zalloc(len, KM_SLEEP); 11549 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11550 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11551 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11552 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11553 11554 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11555 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11556 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11557 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11558 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11559 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11560 11561 dof->dofh_flags = 0; 11562 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11563 dof->dofh_secsize = sizeof (dof_sec_t); 11564 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11565 dof->dofh_secoff = sizeof (dof_hdr_t); 11566 dof->dofh_loadsz = len; 11567 dof->dofh_filesz = len; 11568 dof->dofh_pad = 0; 11569 11570 /* 11571 * Fill in the option section header... 11572 */ 11573 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11574 sec->dofs_type = DOF_SECT_OPTDESC; 11575 sec->dofs_align = sizeof (uint64_t); 11576 sec->dofs_flags = DOF_SECF_LOAD; 11577 sec->dofs_entsize = sizeof (dof_optdesc_t); 11578 11579 opt = (dof_optdesc_t *)((uintptr_t)sec + 11580 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11581 11582 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11583 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11584 11585 for (i = 0; i < DTRACEOPT_MAX; i++) { 11586 opt[i].dofo_option = i; 11587 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11588 opt[i].dofo_value = state->dts_options[i]; 11589 } 11590 11591 return (dof); 11592 } 11593 11594 static dof_hdr_t * 11595 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11596 { 11597 dof_hdr_t hdr, *dof; 11598 11599 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11600 11601 /* 11602 * First, we're going to copyin() the sizeof (dof_hdr_t). 11603 */ 11604 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11605 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11606 *errp = EFAULT; 11607 return (NULL); 11608 } 11609 11610 /* 11611 * Now we'll allocate the entire DOF and copy it in -- provided 11612 * that the length isn't outrageous. 11613 */ 11614 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11615 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11616 *errp = E2BIG; 11617 return (NULL); 11618 } 11619 11620 if (hdr.dofh_loadsz < sizeof (hdr)) { 11621 dtrace_dof_error(&hdr, "invalid load size"); 11622 *errp = EINVAL; 11623 return (NULL); 11624 } 11625 11626 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11627 11628 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11629 kmem_free(dof, hdr.dofh_loadsz); 11630 *errp = EFAULT; 11631 return (NULL); 11632 } 11633 11634 return (dof); 11635 } 11636 11637 #if !defined(sun) 11638 static __inline uchar_t 11639 dtrace_dof_char(char c) { 11640 switch (c) { 11641 case '0': 11642 case '1': 11643 case '2': 11644 case '3': 11645 case '4': 11646 case '5': 11647 case '6': 11648 case '7': 11649 case '8': 11650 case '9': 11651 return (c - '0'); 11652 case 'A': 11653 case 'B': 11654 case 'C': 11655 case 'D': 11656 case 'E': 11657 case 'F': 11658 return (c - 'A' + 10); 11659 case 'a': 11660 case 'b': 11661 case 'c': 11662 case 'd': 11663 case 'e': 11664 case 'f': 11665 return (c - 'a' + 10); 11666 } 11667 /* Should not reach here. */ 11668 return (0); 11669 } 11670 #endif 11671 11672 static dof_hdr_t * 11673 dtrace_dof_property(const char *name) 11674 { 11675 uchar_t *buf; 11676 uint64_t loadsz; 11677 unsigned int len, i; 11678 dof_hdr_t *dof; 11679 11680 #if defined(sun) 11681 /* 11682 * Unfortunately, array of values in .conf files are always (and 11683 * only) interpreted to be integer arrays. We must read our DOF 11684 * as an integer array, and then squeeze it into a byte array. 11685 */ 11686 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11687 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11688 return (NULL); 11689 11690 for (i = 0; i < len; i++) 11691 buf[i] = (uchar_t)(((int *)buf)[i]); 11692 11693 if (len < sizeof (dof_hdr_t)) { 11694 ddi_prop_free(buf); 11695 dtrace_dof_error(NULL, "truncated header"); 11696 return (NULL); 11697 } 11698 11699 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11700 ddi_prop_free(buf); 11701 dtrace_dof_error(NULL, "truncated DOF"); 11702 return (NULL); 11703 } 11704 11705 if (loadsz >= dtrace_dof_maxsize) { 11706 ddi_prop_free(buf); 11707 dtrace_dof_error(NULL, "oversized DOF"); 11708 return (NULL); 11709 } 11710 11711 dof = kmem_alloc(loadsz, KM_SLEEP); 11712 bcopy(buf, dof, loadsz); 11713 ddi_prop_free(buf); 11714 #else 11715 char *p; 11716 char *p_env; 11717 11718 if ((p_env = getenv(name)) == NULL) 11719 return (NULL); 11720 11721 len = strlen(p_env) / 2; 11722 11723 buf = kmem_alloc(len, KM_SLEEP); 11724 11725 dof = (dof_hdr_t *) buf; 11726 11727 p = p_env; 11728 11729 for (i = 0; i < len; i++) { 11730 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11731 dtrace_dof_char(p[1]); 11732 p += 2; 11733 } 11734 11735 freeenv(p_env); 11736 11737 if (len < sizeof (dof_hdr_t)) { 11738 kmem_free(buf, 0); 11739 dtrace_dof_error(NULL, "truncated header"); 11740 return (NULL); 11741 } 11742 11743 if (len < (loadsz = dof->dofh_loadsz)) { 11744 kmem_free(buf, 0); 11745 dtrace_dof_error(NULL, "truncated DOF"); 11746 return (NULL); 11747 } 11748 11749 if (loadsz >= dtrace_dof_maxsize) { 11750 kmem_free(buf, 0); 11751 dtrace_dof_error(NULL, "oversized DOF"); 11752 return (NULL); 11753 } 11754 #endif 11755 11756 return (dof); 11757 } 11758 11759 static void 11760 dtrace_dof_destroy(dof_hdr_t *dof) 11761 { 11762 kmem_free(dof, dof->dofh_loadsz); 11763 } 11764 11765 /* 11766 * Return the dof_sec_t pointer corresponding to a given section index. If the 11767 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11768 * a type other than DOF_SECT_NONE is specified, the header is checked against 11769 * this type and NULL is returned if the types do not match. 11770 */ 11771 static dof_sec_t * 11772 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11773 { 11774 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11775 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11776 11777 if (i >= dof->dofh_secnum) { 11778 dtrace_dof_error(dof, "referenced section index is invalid"); 11779 return (NULL); 11780 } 11781 11782 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11783 dtrace_dof_error(dof, "referenced section is not loadable"); 11784 return (NULL); 11785 } 11786 11787 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11788 dtrace_dof_error(dof, "referenced section is the wrong type"); 11789 return (NULL); 11790 } 11791 11792 return (sec); 11793 } 11794 11795 static dtrace_probedesc_t * 11796 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11797 { 11798 dof_probedesc_t *probe; 11799 dof_sec_t *strtab; 11800 uintptr_t daddr = (uintptr_t)dof; 11801 uintptr_t str; 11802 size_t size; 11803 11804 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11805 dtrace_dof_error(dof, "invalid probe section"); 11806 return (NULL); 11807 } 11808 11809 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11810 dtrace_dof_error(dof, "bad alignment in probe description"); 11811 return (NULL); 11812 } 11813 11814 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11815 dtrace_dof_error(dof, "truncated probe description"); 11816 return (NULL); 11817 } 11818 11819 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11820 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11821 11822 if (strtab == NULL) 11823 return (NULL); 11824 11825 str = daddr + strtab->dofs_offset; 11826 size = strtab->dofs_size; 11827 11828 if (probe->dofp_provider >= strtab->dofs_size) { 11829 dtrace_dof_error(dof, "corrupt probe provider"); 11830 return (NULL); 11831 } 11832 11833 (void) strncpy(desc->dtpd_provider, 11834 (char *)(str + probe->dofp_provider), 11835 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11836 11837 if (probe->dofp_mod >= strtab->dofs_size) { 11838 dtrace_dof_error(dof, "corrupt probe module"); 11839 return (NULL); 11840 } 11841 11842 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11843 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11844 11845 if (probe->dofp_func >= strtab->dofs_size) { 11846 dtrace_dof_error(dof, "corrupt probe function"); 11847 return (NULL); 11848 } 11849 11850 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11851 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11852 11853 if (probe->dofp_name >= strtab->dofs_size) { 11854 dtrace_dof_error(dof, "corrupt probe name"); 11855 return (NULL); 11856 } 11857 11858 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11859 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11860 11861 return (desc); 11862 } 11863 11864 static dtrace_difo_t * 11865 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11866 cred_t *cr) 11867 { 11868 dtrace_difo_t *dp; 11869 size_t ttl = 0; 11870 dof_difohdr_t *dofd; 11871 uintptr_t daddr = (uintptr_t)dof; 11872 size_t max = dtrace_difo_maxsize; 11873 int i, l, n; 11874 11875 static const struct { 11876 int section; 11877 int bufoffs; 11878 int lenoffs; 11879 int entsize; 11880 int align; 11881 const char *msg; 11882 } difo[] = { 11883 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11884 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11885 sizeof (dif_instr_t), "multiple DIF sections" }, 11886 11887 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11888 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11889 sizeof (uint64_t), "multiple integer tables" }, 11890 11891 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11892 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11893 sizeof (char), "multiple string tables" }, 11894 11895 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11896 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11897 sizeof (uint_t), "multiple variable tables" }, 11898 11899 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 11900 }; 11901 11902 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11903 dtrace_dof_error(dof, "invalid DIFO header section"); 11904 return (NULL); 11905 } 11906 11907 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11908 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11909 return (NULL); 11910 } 11911 11912 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11913 sec->dofs_size % sizeof (dof_secidx_t)) { 11914 dtrace_dof_error(dof, "bad size in DIFO header"); 11915 return (NULL); 11916 } 11917 11918 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11919 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11920 11921 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11922 dp->dtdo_rtype = dofd->dofd_rtype; 11923 11924 for (l = 0; l < n; l++) { 11925 dof_sec_t *subsec; 11926 void **bufp; 11927 uint32_t *lenp; 11928 11929 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11930 dofd->dofd_links[l])) == NULL) 11931 goto err; /* invalid section link */ 11932 11933 if (ttl + subsec->dofs_size > max) { 11934 dtrace_dof_error(dof, "exceeds maximum size"); 11935 goto err; 11936 } 11937 11938 ttl += subsec->dofs_size; 11939 11940 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11941 if (subsec->dofs_type != difo[i].section) 11942 continue; 11943 11944 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11945 dtrace_dof_error(dof, "section not loaded"); 11946 goto err; 11947 } 11948 11949 if (subsec->dofs_align != difo[i].align) { 11950 dtrace_dof_error(dof, "bad alignment"); 11951 goto err; 11952 } 11953 11954 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11955 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11956 11957 if (*bufp != NULL) { 11958 dtrace_dof_error(dof, difo[i].msg); 11959 goto err; 11960 } 11961 11962 if (difo[i].entsize != subsec->dofs_entsize) { 11963 dtrace_dof_error(dof, "entry size mismatch"); 11964 goto err; 11965 } 11966 11967 if (subsec->dofs_entsize != 0 && 11968 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11969 dtrace_dof_error(dof, "corrupt entry size"); 11970 goto err; 11971 } 11972 11973 *lenp = subsec->dofs_size; 11974 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11975 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11976 *bufp, subsec->dofs_size); 11977 11978 if (subsec->dofs_entsize != 0) 11979 *lenp /= subsec->dofs_entsize; 11980 11981 break; 11982 } 11983 11984 /* 11985 * If we encounter a loadable DIFO sub-section that is not 11986 * known to us, assume this is a broken program and fail. 11987 */ 11988 if (difo[i].section == DOF_SECT_NONE && 11989 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11990 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11991 goto err; 11992 } 11993 } 11994 11995 if (dp->dtdo_buf == NULL) { 11996 /* 11997 * We can't have a DIF object without DIF text. 11998 */ 11999 dtrace_dof_error(dof, "missing DIF text"); 12000 goto err; 12001 } 12002 12003 /* 12004 * Before we validate the DIF object, run through the variable table 12005 * looking for the strings -- if any of their size are under, we'll set 12006 * their size to be the system-wide default string size. Note that 12007 * this should _not_ happen if the "strsize" option has been set -- 12008 * in this case, the compiler should have set the size to reflect the 12009 * setting of the option. 12010 */ 12011 for (i = 0; i < dp->dtdo_varlen; i++) { 12012 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12013 dtrace_diftype_t *t = &v->dtdv_type; 12014 12015 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12016 continue; 12017 12018 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12019 t->dtdt_size = dtrace_strsize_default; 12020 } 12021 12022 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12023 goto err; 12024 12025 dtrace_difo_init(dp, vstate); 12026 return (dp); 12027 12028 err: 12029 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12030 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12031 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12032 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12033 12034 kmem_free(dp, sizeof (dtrace_difo_t)); 12035 return (NULL); 12036 } 12037 12038 static dtrace_predicate_t * 12039 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12040 cred_t *cr) 12041 { 12042 dtrace_difo_t *dp; 12043 12044 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12045 return (NULL); 12046 12047 return (dtrace_predicate_create(dp)); 12048 } 12049 12050 static dtrace_actdesc_t * 12051 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12052 cred_t *cr) 12053 { 12054 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12055 dof_actdesc_t *desc; 12056 dof_sec_t *difosec; 12057 size_t offs; 12058 uintptr_t daddr = (uintptr_t)dof; 12059 uint64_t arg; 12060 dtrace_actkind_t kind; 12061 12062 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12063 dtrace_dof_error(dof, "invalid action section"); 12064 return (NULL); 12065 } 12066 12067 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12068 dtrace_dof_error(dof, "truncated action description"); 12069 return (NULL); 12070 } 12071 12072 if (sec->dofs_align != sizeof (uint64_t)) { 12073 dtrace_dof_error(dof, "bad alignment in action description"); 12074 return (NULL); 12075 } 12076 12077 if (sec->dofs_size < sec->dofs_entsize) { 12078 dtrace_dof_error(dof, "section entry size exceeds total size"); 12079 return (NULL); 12080 } 12081 12082 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12083 dtrace_dof_error(dof, "bad entry size in action description"); 12084 return (NULL); 12085 } 12086 12087 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12088 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12089 return (NULL); 12090 } 12091 12092 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12093 desc = (dof_actdesc_t *)(daddr + 12094 (uintptr_t)sec->dofs_offset + offs); 12095 kind = (dtrace_actkind_t)desc->dofa_kind; 12096 12097 if (DTRACEACT_ISPRINTFLIKE(kind) && 12098 (kind != DTRACEACT_PRINTA || 12099 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12100 dof_sec_t *strtab; 12101 char *str, *fmt; 12102 uint64_t i; 12103 12104 /* 12105 * printf()-like actions must have a format string. 12106 */ 12107 if ((strtab = dtrace_dof_sect(dof, 12108 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12109 goto err; 12110 12111 str = (char *)((uintptr_t)dof + 12112 (uintptr_t)strtab->dofs_offset); 12113 12114 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12115 if (str[i] == '\0') 12116 break; 12117 } 12118 12119 if (i >= strtab->dofs_size) { 12120 dtrace_dof_error(dof, "bogus format string"); 12121 goto err; 12122 } 12123 12124 if (i == desc->dofa_arg) { 12125 dtrace_dof_error(dof, "empty format string"); 12126 goto err; 12127 } 12128 12129 i -= desc->dofa_arg; 12130 fmt = kmem_alloc(i + 1, KM_SLEEP); 12131 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12132 arg = (uint64_t)(uintptr_t)fmt; 12133 } else { 12134 if (kind == DTRACEACT_PRINTA) { 12135 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12136 arg = 0; 12137 } else { 12138 arg = desc->dofa_arg; 12139 } 12140 } 12141 12142 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12143 desc->dofa_uarg, arg); 12144 12145 if (last != NULL) { 12146 last->dtad_next = act; 12147 } else { 12148 first = act; 12149 } 12150 12151 last = act; 12152 12153 if (desc->dofa_difo == DOF_SECIDX_NONE) 12154 continue; 12155 12156 if ((difosec = dtrace_dof_sect(dof, 12157 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12158 goto err; 12159 12160 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12161 12162 if (act->dtad_difo == NULL) 12163 goto err; 12164 } 12165 12166 ASSERT(first != NULL); 12167 return (first); 12168 12169 err: 12170 for (act = first; act != NULL; act = next) { 12171 next = act->dtad_next; 12172 dtrace_actdesc_release(act, vstate); 12173 } 12174 12175 return (NULL); 12176 } 12177 12178 static dtrace_ecbdesc_t * 12179 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12180 cred_t *cr) 12181 { 12182 dtrace_ecbdesc_t *ep; 12183 dof_ecbdesc_t *ecb; 12184 dtrace_probedesc_t *desc; 12185 dtrace_predicate_t *pred = NULL; 12186 12187 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12188 dtrace_dof_error(dof, "truncated ECB description"); 12189 return (NULL); 12190 } 12191 12192 if (sec->dofs_align != sizeof (uint64_t)) { 12193 dtrace_dof_error(dof, "bad alignment in ECB description"); 12194 return (NULL); 12195 } 12196 12197 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12198 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12199 12200 if (sec == NULL) 12201 return (NULL); 12202 12203 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12204 ep->dted_uarg = ecb->dofe_uarg; 12205 desc = &ep->dted_probe; 12206 12207 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12208 goto err; 12209 12210 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12211 if ((sec = dtrace_dof_sect(dof, 12212 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12213 goto err; 12214 12215 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12216 goto err; 12217 12218 ep->dted_pred.dtpdd_predicate = pred; 12219 } 12220 12221 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12222 if ((sec = dtrace_dof_sect(dof, 12223 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12224 goto err; 12225 12226 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12227 12228 if (ep->dted_action == NULL) 12229 goto err; 12230 } 12231 12232 return (ep); 12233 12234 err: 12235 if (pred != NULL) 12236 dtrace_predicate_release(pred, vstate); 12237 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12238 return (NULL); 12239 } 12240 12241 /* 12242 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12243 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12244 * site of any user SETX relocations to account for load object base address. 12245 * In the future, if we need other relocations, this function can be extended. 12246 */ 12247 static int 12248 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12249 { 12250 uintptr_t daddr = (uintptr_t)dof; 12251 dof_relohdr_t *dofr = 12252 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12253 dof_sec_t *ss, *rs, *ts; 12254 dof_relodesc_t *r; 12255 uint_t i, n; 12256 12257 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12258 sec->dofs_align != sizeof (dof_secidx_t)) { 12259 dtrace_dof_error(dof, "invalid relocation header"); 12260 return (-1); 12261 } 12262 12263 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12264 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12265 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12266 12267 if (ss == NULL || rs == NULL || ts == NULL) 12268 return (-1); /* dtrace_dof_error() has been called already */ 12269 12270 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12271 rs->dofs_align != sizeof (uint64_t)) { 12272 dtrace_dof_error(dof, "invalid relocation section"); 12273 return (-1); 12274 } 12275 12276 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12277 n = rs->dofs_size / rs->dofs_entsize; 12278 12279 for (i = 0; i < n; i++) { 12280 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12281 12282 switch (r->dofr_type) { 12283 case DOF_RELO_NONE: 12284 break; 12285 case DOF_RELO_SETX: 12286 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12287 sizeof (uint64_t) > ts->dofs_size) { 12288 dtrace_dof_error(dof, "bad relocation offset"); 12289 return (-1); 12290 } 12291 12292 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12293 dtrace_dof_error(dof, "misaligned setx relo"); 12294 return (-1); 12295 } 12296 12297 *(uint64_t *)taddr += ubase; 12298 break; 12299 default: 12300 dtrace_dof_error(dof, "invalid relocation type"); 12301 return (-1); 12302 } 12303 12304 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12305 } 12306 12307 return (0); 12308 } 12309 12310 /* 12311 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12312 * header: it should be at the front of a memory region that is at least 12313 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12314 * size. It need not be validated in any other way. 12315 */ 12316 static int 12317 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12318 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12319 { 12320 uint64_t len = dof->dofh_loadsz, seclen; 12321 uintptr_t daddr = (uintptr_t)dof; 12322 dtrace_ecbdesc_t *ep; 12323 dtrace_enabling_t *enab; 12324 uint_t i; 12325 12326 ASSERT(MUTEX_HELD(&dtrace_lock)); 12327 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12328 12329 /* 12330 * Check the DOF header identification bytes. In addition to checking 12331 * valid settings, we also verify that unused bits/bytes are zeroed so 12332 * we can use them later without fear of regressing existing binaries. 12333 */ 12334 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12335 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12336 dtrace_dof_error(dof, "DOF magic string mismatch"); 12337 return (-1); 12338 } 12339 12340 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12341 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12342 dtrace_dof_error(dof, "DOF has invalid data model"); 12343 return (-1); 12344 } 12345 12346 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12347 dtrace_dof_error(dof, "DOF encoding mismatch"); 12348 return (-1); 12349 } 12350 12351 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12352 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12353 dtrace_dof_error(dof, "DOF version mismatch"); 12354 return (-1); 12355 } 12356 12357 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12358 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12359 return (-1); 12360 } 12361 12362 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12363 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12364 return (-1); 12365 } 12366 12367 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12368 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12369 return (-1); 12370 } 12371 12372 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12373 if (dof->dofh_ident[i] != 0) { 12374 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12375 return (-1); 12376 } 12377 } 12378 12379 if (dof->dofh_flags & ~DOF_FL_VALID) { 12380 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12381 return (-1); 12382 } 12383 12384 if (dof->dofh_secsize == 0) { 12385 dtrace_dof_error(dof, "zero section header size"); 12386 return (-1); 12387 } 12388 12389 /* 12390 * Check that the section headers don't exceed the amount of DOF 12391 * data. Note that we cast the section size and number of sections 12392 * to uint64_t's to prevent possible overflow in the multiplication. 12393 */ 12394 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12395 12396 if (dof->dofh_secoff > len || seclen > len || 12397 dof->dofh_secoff + seclen > len) { 12398 dtrace_dof_error(dof, "truncated section headers"); 12399 return (-1); 12400 } 12401 12402 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12403 dtrace_dof_error(dof, "misaligned section headers"); 12404 return (-1); 12405 } 12406 12407 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12408 dtrace_dof_error(dof, "misaligned section size"); 12409 return (-1); 12410 } 12411 12412 /* 12413 * Take an initial pass through the section headers to be sure that 12414 * the headers don't have stray offsets. If the 'noprobes' flag is 12415 * set, do not permit sections relating to providers, probes, or args. 12416 */ 12417 for (i = 0; i < dof->dofh_secnum; i++) { 12418 dof_sec_t *sec = (dof_sec_t *)(daddr + 12419 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12420 12421 if (noprobes) { 12422 switch (sec->dofs_type) { 12423 case DOF_SECT_PROVIDER: 12424 case DOF_SECT_PROBES: 12425 case DOF_SECT_PRARGS: 12426 case DOF_SECT_PROFFS: 12427 dtrace_dof_error(dof, "illegal sections " 12428 "for enabling"); 12429 return (-1); 12430 } 12431 } 12432 12433 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12434 continue; /* just ignore non-loadable sections */ 12435 12436 if (sec->dofs_align & (sec->dofs_align - 1)) { 12437 dtrace_dof_error(dof, "bad section alignment"); 12438 return (-1); 12439 } 12440 12441 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12442 dtrace_dof_error(dof, "misaligned section"); 12443 return (-1); 12444 } 12445 12446 if (sec->dofs_offset > len || sec->dofs_size > len || 12447 sec->dofs_offset + sec->dofs_size > len) { 12448 dtrace_dof_error(dof, "corrupt section header"); 12449 return (-1); 12450 } 12451 12452 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12453 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12454 dtrace_dof_error(dof, "non-terminating string table"); 12455 return (-1); 12456 } 12457 } 12458 12459 /* 12460 * Take a second pass through the sections and locate and perform any 12461 * relocations that are present. We do this after the first pass to 12462 * be sure that all sections have had their headers validated. 12463 */ 12464 for (i = 0; i < dof->dofh_secnum; i++) { 12465 dof_sec_t *sec = (dof_sec_t *)(daddr + 12466 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12467 12468 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12469 continue; /* skip sections that are not loadable */ 12470 12471 switch (sec->dofs_type) { 12472 case DOF_SECT_URELHDR: 12473 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12474 return (-1); 12475 break; 12476 } 12477 } 12478 12479 if ((enab = *enabp) == NULL) 12480 enab = *enabp = dtrace_enabling_create(vstate); 12481 12482 for (i = 0; i < dof->dofh_secnum; i++) { 12483 dof_sec_t *sec = (dof_sec_t *)(daddr + 12484 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12485 12486 if (sec->dofs_type != DOF_SECT_ECBDESC) 12487 continue; 12488 12489 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12490 dtrace_enabling_destroy(enab); 12491 *enabp = NULL; 12492 return (-1); 12493 } 12494 12495 dtrace_enabling_add(enab, ep); 12496 } 12497 12498 return (0); 12499 } 12500 12501 /* 12502 * Process DOF for any options. This routine assumes that the DOF has been 12503 * at least processed by dtrace_dof_slurp(). 12504 */ 12505 static int 12506 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12507 { 12508 int i, rval; 12509 uint32_t entsize; 12510 size_t offs; 12511 dof_optdesc_t *desc; 12512 12513 for (i = 0; i < dof->dofh_secnum; i++) { 12514 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12515 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12516 12517 if (sec->dofs_type != DOF_SECT_OPTDESC) 12518 continue; 12519 12520 if (sec->dofs_align != sizeof (uint64_t)) { 12521 dtrace_dof_error(dof, "bad alignment in " 12522 "option description"); 12523 return (EINVAL); 12524 } 12525 12526 if ((entsize = sec->dofs_entsize) == 0) { 12527 dtrace_dof_error(dof, "zeroed option entry size"); 12528 return (EINVAL); 12529 } 12530 12531 if (entsize < sizeof (dof_optdesc_t)) { 12532 dtrace_dof_error(dof, "bad option entry size"); 12533 return (EINVAL); 12534 } 12535 12536 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12537 desc = (dof_optdesc_t *)((uintptr_t)dof + 12538 (uintptr_t)sec->dofs_offset + offs); 12539 12540 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12541 dtrace_dof_error(dof, "non-zero option string"); 12542 return (EINVAL); 12543 } 12544 12545 if (desc->dofo_value == DTRACEOPT_UNSET) { 12546 dtrace_dof_error(dof, "unset option"); 12547 return (EINVAL); 12548 } 12549 12550 if ((rval = dtrace_state_option(state, 12551 desc->dofo_option, desc->dofo_value)) != 0) { 12552 dtrace_dof_error(dof, "rejected option"); 12553 return (rval); 12554 } 12555 } 12556 } 12557 12558 return (0); 12559 } 12560 12561 /* 12562 * DTrace Consumer State Functions 12563 */ 12564 static int 12565 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12566 { 12567 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12568 void *base; 12569 uintptr_t limit; 12570 dtrace_dynvar_t *dvar, *next, *start; 12571 int i; 12572 12573 ASSERT(MUTEX_HELD(&dtrace_lock)); 12574 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12575 12576 bzero(dstate, sizeof (dtrace_dstate_t)); 12577 12578 if ((dstate->dtds_chunksize = chunksize) == 0) 12579 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12580 12581 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12582 size = min; 12583 12584 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12585 return (ENOMEM); 12586 12587 dstate->dtds_size = size; 12588 dstate->dtds_base = base; 12589 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12590 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12591 12592 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12593 12594 if (hashsize != 1 && (hashsize & 1)) 12595 hashsize--; 12596 12597 dstate->dtds_hashsize = hashsize; 12598 dstate->dtds_hash = dstate->dtds_base; 12599 12600 /* 12601 * Set all of our hash buckets to point to the single sink, and (if 12602 * it hasn't already been set), set the sink's hash value to be the 12603 * sink sentinel value. The sink is needed for dynamic variable 12604 * lookups to know that they have iterated over an entire, valid hash 12605 * chain. 12606 */ 12607 for (i = 0; i < hashsize; i++) 12608 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12609 12610 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12611 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12612 12613 /* 12614 * Determine number of active CPUs. Divide free list evenly among 12615 * active CPUs. 12616 */ 12617 start = (dtrace_dynvar_t *) 12618 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12619 limit = (uintptr_t)base + size; 12620 12621 maxper = (limit - (uintptr_t)start) / NCPU; 12622 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12623 12624 for (i = 0; i < NCPU; i++) { 12625 #if !defined(sun) 12626 if (CPU_ABSENT(i)) 12627 continue; 12628 #endif 12629 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12630 12631 /* 12632 * If we don't even have enough chunks to make it once through 12633 * NCPUs, we're just going to allocate everything to the first 12634 * CPU. And if we're on the last CPU, we're going to allocate 12635 * whatever is left over. In either case, we set the limit to 12636 * be the limit of the dynamic variable space. 12637 */ 12638 if (maxper == 0 || i == NCPU - 1) { 12639 limit = (uintptr_t)base + size; 12640 start = NULL; 12641 } else { 12642 limit = (uintptr_t)start + maxper; 12643 start = (dtrace_dynvar_t *)limit; 12644 } 12645 12646 ASSERT(limit <= (uintptr_t)base + size); 12647 12648 for (;;) { 12649 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12650 dstate->dtds_chunksize); 12651 12652 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12653 break; 12654 12655 dvar->dtdv_next = next; 12656 dvar = next; 12657 } 12658 12659 if (maxper == 0) 12660 break; 12661 } 12662 12663 return (0); 12664 } 12665 12666 static void 12667 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12668 { 12669 ASSERT(MUTEX_HELD(&cpu_lock)); 12670 12671 if (dstate->dtds_base == NULL) 12672 return; 12673 12674 kmem_free(dstate->dtds_base, dstate->dtds_size); 12675 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12676 } 12677 12678 static void 12679 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12680 { 12681 /* 12682 * Logical XOR, where are you? 12683 */ 12684 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12685 12686 if (vstate->dtvs_nglobals > 0) { 12687 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12688 sizeof (dtrace_statvar_t *)); 12689 } 12690 12691 if (vstate->dtvs_ntlocals > 0) { 12692 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12693 sizeof (dtrace_difv_t)); 12694 } 12695 12696 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12697 12698 if (vstate->dtvs_nlocals > 0) { 12699 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12700 sizeof (dtrace_statvar_t *)); 12701 } 12702 } 12703 12704 #if defined(sun) 12705 static void 12706 dtrace_state_clean(dtrace_state_t *state) 12707 { 12708 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12709 return; 12710 12711 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12712 dtrace_speculation_clean(state); 12713 } 12714 12715 static void 12716 dtrace_state_deadman(dtrace_state_t *state) 12717 { 12718 hrtime_t now; 12719 12720 dtrace_sync(); 12721 12722 now = dtrace_gethrtime(); 12723 12724 if (state != dtrace_anon.dta_state && 12725 now - state->dts_laststatus >= dtrace_deadman_user) 12726 return; 12727 12728 /* 12729 * We must be sure that dts_alive never appears to be less than the 12730 * value upon entry to dtrace_state_deadman(), and because we lack a 12731 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12732 * store INT64_MAX to it, followed by a memory barrier, followed by 12733 * the new value. This assures that dts_alive never appears to be 12734 * less than its true value, regardless of the order in which the 12735 * stores to the underlying storage are issued. 12736 */ 12737 state->dts_alive = INT64_MAX; 12738 dtrace_membar_producer(); 12739 state->dts_alive = now; 12740 } 12741 #else 12742 static void 12743 dtrace_state_clean(void *arg) 12744 { 12745 dtrace_state_t *state = arg; 12746 dtrace_optval_t *opt = state->dts_options; 12747 12748 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12749 return; 12750 12751 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12752 dtrace_speculation_clean(state); 12753 12754 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 12755 dtrace_state_clean, state); 12756 } 12757 12758 static void 12759 dtrace_state_deadman(void *arg) 12760 { 12761 dtrace_state_t *state = arg; 12762 hrtime_t now; 12763 12764 dtrace_sync(); 12765 12766 dtrace_debug_output(); 12767 12768 now = dtrace_gethrtime(); 12769 12770 if (state != dtrace_anon.dta_state && 12771 now - state->dts_laststatus >= dtrace_deadman_user) 12772 return; 12773 12774 /* 12775 * We must be sure that dts_alive never appears to be less than the 12776 * value upon entry to dtrace_state_deadman(), and because we lack a 12777 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12778 * store INT64_MAX to it, followed by a memory barrier, followed by 12779 * the new value. This assures that dts_alive never appears to be 12780 * less than its true value, regardless of the order in which the 12781 * stores to the underlying storage are issued. 12782 */ 12783 state->dts_alive = INT64_MAX; 12784 dtrace_membar_producer(); 12785 state->dts_alive = now; 12786 12787 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 12788 dtrace_state_deadman, state); 12789 } 12790 #endif 12791 12792 static dtrace_state_t * 12793 #if defined(sun) 12794 dtrace_state_create(dev_t *devp, cred_t *cr) 12795 #else 12796 dtrace_state_create(struct cdev *dev) 12797 #endif 12798 { 12799 #if defined(sun) 12800 minor_t minor; 12801 major_t major; 12802 #else 12803 cred_t *cr = NULL; 12804 int m = 0; 12805 #endif 12806 char c[30]; 12807 dtrace_state_t *state; 12808 dtrace_optval_t *opt; 12809 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12810 12811 ASSERT(MUTEX_HELD(&dtrace_lock)); 12812 ASSERT(MUTEX_HELD(&cpu_lock)); 12813 12814 #if defined(sun) 12815 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12816 VM_BESTFIT | VM_SLEEP); 12817 12818 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12819 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12820 return (NULL); 12821 } 12822 12823 state = ddi_get_soft_state(dtrace_softstate, minor); 12824 #else 12825 if (dev != NULL) { 12826 cr = dev->si_cred; 12827 m = dev2unit(dev); 12828 } 12829 12830 /* Allocate memory for the state. */ 12831 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12832 #endif 12833 12834 state->dts_epid = DTRACE_EPIDNONE + 1; 12835 12836 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12837 #if defined(sun) 12838 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12839 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12840 12841 if (devp != NULL) { 12842 major = getemajor(*devp); 12843 } else { 12844 major = ddi_driver_major(dtrace_devi); 12845 } 12846 12847 state->dts_dev = makedevice(major, minor); 12848 12849 if (devp != NULL) 12850 *devp = state->dts_dev; 12851 #else 12852 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 12853 state->dts_dev = dev; 12854 #endif 12855 12856 /* 12857 * We allocate NCPU buffers. On the one hand, this can be quite 12858 * a bit of memory per instance (nearly 36K on a Starcat). On the 12859 * other hand, it saves an additional memory reference in the probe 12860 * path. 12861 */ 12862 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12863 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12864 12865 #if defined(sun) 12866 state->dts_cleaner = CYCLIC_NONE; 12867 state->dts_deadman = CYCLIC_NONE; 12868 #else 12869 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 12870 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 12871 #endif 12872 state->dts_vstate.dtvs_state = state; 12873 12874 for (i = 0; i < DTRACEOPT_MAX; i++) 12875 state->dts_options[i] = DTRACEOPT_UNSET; 12876 12877 /* 12878 * Set the default options. 12879 */ 12880 opt = state->dts_options; 12881 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12882 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12883 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12884 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12885 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12886 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12887 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12888 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12889 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12890 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12891 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12892 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12893 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12894 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12895 12896 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12897 12898 /* 12899 * Depending on the user credentials, we set flag bits which alter probe 12900 * visibility or the amount of destructiveness allowed. In the case of 12901 * actual anonymous tracing, or the possession of all privileges, all of 12902 * the normal checks are bypassed. 12903 */ 12904 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12905 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12906 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12907 } else { 12908 /* 12909 * Set up the credentials for this instantiation. We take a 12910 * hold on the credential to prevent it from disappearing on 12911 * us; this in turn prevents the zone_t referenced by this 12912 * credential from disappearing. This means that we can 12913 * examine the credential and the zone from probe context. 12914 */ 12915 crhold(cr); 12916 state->dts_cred.dcr_cred = cr; 12917 12918 /* 12919 * CRA_PROC means "we have *some* privilege for dtrace" and 12920 * unlocks the use of variables like pid, zonename, etc. 12921 */ 12922 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12923 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12924 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12925 } 12926 12927 /* 12928 * dtrace_user allows use of syscall and profile providers. 12929 * If the user also has proc_owner and/or proc_zone, we 12930 * extend the scope to include additional visibility and 12931 * destructive power. 12932 */ 12933 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12934 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12935 state->dts_cred.dcr_visible |= 12936 DTRACE_CRV_ALLPROC; 12937 12938 state->dts_cred.dcr_action |= 12939 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12940 } 12941 12942 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12943 state->dts_cred.dcr_visible |= 12944 DTRACE_CRV_ALLZONE; 12945 12946 state->dts_cred.dcr_action |= 12947 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12948 } 12949 12950 /* 12951 * If we have all privs in whatever zone this is, 12952 * we can do destructive things to processes which 12953 * have altered credentials. 12954 */ 12955 #if defined(sun) 12956 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12957 cr->cr_zone->zone_privset)) { 12958 state->dts_cred.dcr_action |= 12959 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12960 } 12961 #endif 12962 } 12963 12964 /* 12965 * Holding the dtrace_kernel privilege also implies that 12966 * the user has the dtrace_user privilege from a visibility 12967 * perspective. But without further privileges, some 12968 * destructive actions are not available. 12969 */ 12970 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12971 /* 12972 * Make all probes in all zones visible. However, 12973 * this doesn't mean that all actions become available 12974 * to all zones. 12975 */ 12976 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12977 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12978 12979 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12980 DTRACE_CRA_PROC; 12981 /* 12982 * Holding proc_owner means that destructive actions 12983 * for *this* zone are allowed. 12984 */ 12985 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12986 state->dts_cred.dcr_action |= 12987 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12988 12989 /* 12990 * Holding proc_zone means that destructive actions 12991 * for this user/group ID in all zones is allowed. 12992 */ 12993 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12994 state->dts_cred.dcr_action |= 12995 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12996 12997 #if defined(sun) 12998 /* 12999 * If we have all privs in whatever zone this is, 13000 * we can do destructive things to processes which 13001 * have altered credentials. 13002 */ 13003 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13004 cr->cr_zone->zone_privset)) { 13005 state->dts_cred.dcr_action |= 13006 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13007 } 13008 #endif 13009 } 13010 13011 /* 13012 * Holding the dtrace_proc privilege gives control over fasttrap 13013 * and pid providers. We need to grant wider destructive 13014 * privileges in the event that the user has proc_owner and/or 13015 * proc_zone. 13016 */ 13017 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13018 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13019 state->dts_cred.dcr_action |= 13020 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13021 13022 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13023 state->dts_cred.dcr_action |= 13024 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13025 } 13026 } 13027 13028 return (state); 13029 } 13030 13031 static int 13032 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13033 { 13034 dtrace_optval_t *opt = state->dts_options, size; 13035 processorid_t cpu = 0;; 13036 int flags = 0, rval; 13037 13038 ASSERT(MUTEX_HELD(&dtrace_lock)); 13039 ASSERT(MUTEX_HELD(&cpu_lock)); 13040 ASSERT(which < DTRACEOPT_MAX); 13041 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13042 (state == dtrace_anon.dta_state && 13043 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13044 13045 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13046 return (0); 13047 13048 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13049 cpu = opt[DTRACEOPT_CPU]; 13050 13051 if (which == DTRACEOPT_SPECSIZE) 13052 flags |= DTRACEBUF_NOSWITCH; 13053 13054 if (which == DTRACEOPT_BUFSIZE) { 13055 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13056 flags |= DTRACEBUF_RING; 13057 13058 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13059 flags |= DTRACEBUF_FILL; 13060 13061 if (state != dtrace_anon.dta_state || 13062 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13063 flags |= DTRACEBUF_INACTIVE; 13064 } 13065 13066 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13067 /* 13068 * The size must be 8-byte aligned. If the size is not 8-byte 13069 * aligned, drop it down by the difference. 13070 */ 13071 if (size & (sizeof (uint64_t) - 1)) 13072 size -= size & (sizeof (uint64_t) - 1); 13073 13074 if (size < state->dts_reserve) { 13075 /* 13076 * Buffers always must be large enough to accommodate 13077 * their prereserved space. We return E2BIG instead 13078 * of ENOMEM in this case to allow for user-level 13079 * software to differentiate the cases. 13080 */ 13081 return (E2BIG); 13082 } 13083 13084 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13085 13086 if (rval != ENOMEM) { 13087 opt[which] = size; 13088 return (rval); 13089 } 13090 13091 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13092 return (rval); 13093 } 13094 13095 return (ENOMEM); 13096 } 13097 13098 static int 13099 dtrace_state_buffers(dtrace_state_t *state) 13100 { 13101 dtrace_speculation_t *spec = state->dts_speculations; 13102 int rval, i; 13103 13104 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13105 DTRACEOPT_BUFSIZE)) != 0) 13106 return (rval); 13107 13108 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13109 DTRACEOPT_AGGSIZE)) != 0) 13110 return (rval); 13111 13112 for (i = 0; i < state->dts_nspeculations; i++) { 13113 if ((rval = dtrace_state_buffer(state, 13114 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13115 return (rval); 13116 } 13117 13118 return (0); 13119 } 13120 13121 static void 13122 dtrace_state_prereserve(dtrace_state_t *state) 13123 { 13124 dtrace_ecb_t *ecb; 13125 dtrace_probe_t *probe; 13126 13127 state->dts_reserve = 0; 13128 13129 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13130 return; 13131 13132 /* 13133 * If our buffer policy is a "fill" buffer policy, we need to set the 13134 * prereserved space to be the space required by the END probes. 13135 */ 13136 probe = dtrace_probes[dtrace_probeid_end - 1]; 13137 ASSERT(probe != NULL); 13138 13139 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13140 if (ecb->dte_state != state) 13141 continue; 13142 13143 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13144 } 13145 } 13146 13147 static int 13148 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13149 { 13150 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13151 dtrace_speculation_t *spec; 13152 dtrace_buffer_t *buf; 13153 #if defined(sun) 13154 cyc_handler_t hdlr; 13155 cyc_time_t when; 13156 #endif 13157 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13158 dtrace_icookie_t cookie; 13159 13160 mutex_enter(&cpu_lock); 13161 mutex_enter(&dtrace_lock); 13162 13163 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13164 rval = EBUSY; 13165 goto out; 13166 } 13167 13168 /* 13169 * Before we can perform any checks, we must prime all of the 13170 * retained enablings that correspond to this state. 13171 */ 13172 dtrace_enabling_prime(state); 13173 13174 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13175 rval = EACCES; 13176 goto out; 13177 } 13178 13179 dtrace_state_prereserve(state); 13180 13181 /* 13182 * Now we want to do is try to allocate our speculations. 13183 * We do not automatically resize the number of speculations; if 13184 * this fails, we will fail the operation. 13185 */ 13186 nspec = opt[DTRACEOPT_NSPEC]; 13187 ASSERT(nspec != DTRACEOPT_UNSET); 13188 13189 if (nspec > INT_MAX) { 13190 rval = ENOMEM; 13191 goto out; 13192 } 13193 13194 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13195 13196 if (spec == NULL) { 13197 rval = ENOMEM; 13198 goto out; 13199 } 13200 13201 state->dts_speculations = spec; 13202 state->dts_nspeculations = (int)nspec; 13203 13204 for (i = 0; i < nspec; i++) { 13205 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13206 rval = ENOMEM; 13207 goto err; 13208 } 13209 13210 spec[i].dtsp_buffer = buf; 13211 } 13212 13213 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13214 if (dtrace_anon.dta_state == NULL) { 13215 rval = ENOENT; 13216 goto out; 13217 } 13218 13219 if (state->dts_necbs != 0) { 13220 rval = EALREADY; 13221 goto out; 13222 } 13223 13224 state->dts_anon = dtrace_anon_grab(); 13225 ASSERT(state->dts_anon != NULL); 13226 state = state->dts_anon; 13227 13228 /* 13229 * We want "grabanon" to be set in the grabbed state, so we'll 13230 * copy that option value from the grabbing state into the 13231 * grabbed state. 13232 */ 13233 state->dts_options[DTRACEOPT_GRABANON] = 13234 opt[DTRACEOPT_GRABANON]; 13235 13236 *cpu = dtrace_anon.dta_beganon; 13237 13238 /* 13239 * If the anonymous state is active (as it almost certainly 13240 * is if the anonymous enabling ultimately matched anything), 13241 * we don't allow any further option processing -- but we 13242 * don't return failure. 13243 */ 13244 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13245 goto out; 13246 } 13247 13248 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13249 opt[DTRACEOPT_AGGSIZE] != 0) { 13250 if (state->dts_aggregations == NULL) { 13251 /* 13252 * We're not going to create an aggregation buffer 13253 * because we don't have any ECBs that contain 13254 * aggregations -- set this option to 0. 13255 */ 13256 opt[DTRACEOPT_AGGSIZE] = 0; 13257 } else { 13258 /* 13259 * If we have an aggregation buffer, we must also have 13260 * a buffer to use as scratch. 13261 */ 13262 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13263 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13264 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13265 } 13266 } 13267 } 13268 13269 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13270 opt[DTRACEOPT_SPECSIZE] != 0) { 13271 if (!state->dts_speculates) { 13272 /* 13273 * We're not going to create speculation buffers 13274 * because we don't have any ECBs that actually 13275 * speculate -- set the speculation size to 0. 13276 */ 13277 opt[DTRACEOPT_SPECSIZE] = 0; 13278 } 13279 } 13280 13281 /* 13282 * The bare minimum size for any buffer that we're actually going to 13283 * do anything to is sizeof (uint64_t). 13284 */ 13285 sz = sizeof (uint64_t); 13286 13287 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13288 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13289 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13290 /* 13291 * A buffer size has been explicitly set to 0 (or to a size 13292 * that will be adjusted to 0) and we need the space -- we 13293 * need to return failure. We return ENOSPC to differentiate 13294 * it from failing to allocate a buffer due to failure to meet 13295 * the reserve (for which we return E2BIG). 13296 */ 13297 rval = ENOSPC; 13298 goto out; 13299 } 13300 13301 if ((rval = dtrace_state_buffers(state)) != 0) 13302 goto err; 13303 13304 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13305 sz = dtrace_dstate_defsize; 13306 13307 do { 13308 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13309 13310 if (rval == 0) 13311 break; 13312 13313 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13314 goto err; 13315 } while (sz >>= 1); 13316 13317 opt[DTRACEOPT_DYNVARSIZE] = sz; 13318 13319 if (rval != 0) 13320 goto err; 13321 13322 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13323 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13324 13325 if (opt[DTRACEOPT_CLEANRATE] == 0) 13326 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13327 13328 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13329 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13330 13331 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13332 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13333 13334 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13335 #if defined(sun) 13336 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13337 hdlr.cyh_arg = state; 13338 hdlr.cyh_level = CY_LOW_LEVEL; 13339 13340 when.cyt_when = 0; 13341 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13342 13343 state->dts_cleaner = cyclic_add(&hdlr, &when); 13344 13345 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13346 hdlr.cyh_arg = state; 13347 hdlr.cyh_level = CY_LOW_LEVEL; 13348 13349 when.cyt_when = 0; 13350 when.cyt_interval = dtrace_deadman_interval; 13351 13352 state->dts_deadman = cyclic_add(&hdlr, &when); 13353 #else 13354 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13355 dtrace_state_clean, state); 13356 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13357 dtrace_state_deadman, state); 13358 #endif 13359 13360 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13361 13362 /* 13363 * Now it's time to actually fire the BEGIN probe. We need to disable 13364 * interrupts here both to record the CPU on which we fired the BEGIN 13365 * probe (the data from this CPU will be processed first at user 13366 * level) and to manually activate the buffer for this CPU. 13367 */ 13368 cookie = dtrace_interrupt_disable(); 13369 *cpu = curcpu; 13370 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13371 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13372 13373 dtrace_probe(dtrace_probeid_begin, 13374 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13375 dtrace_interrupt_enable(cookie); 13376 /* 13377 * We may have had an exit action from a BEGIN probe; only change our 13378 * state to ACTIVE if we're still in WARMUP. 13379 */ 13380 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13381 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13382 13383 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13384 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13385 13386 /* 13387 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13388 * want each CPU to transition its principal buffer out of the 13389 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13390 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13391 * atomically transition from processing none of a state's ECBs to 13392 * processing all of them. 13393 */ 13394 dtrace_xcall(DTRACE_CPUALL, 13395 (dtrace_xcall_t)dtrace_buffer_activate, state); 13396 goto out; 13397 13398 err: 13399 dtrace_buffer_free(state->dts_buffer); 13400 dtrace_buffer_free(state->dts_aggbuffer); 13401 13402 if ((nspec = state->dts_nspeculations) == 0) { 13403 ASSERT(state->dts_speculations == NULL); 13404 goto out; 13405 } 13406 13407 spec = state->dts_speculations; 13408 ASSERT(spec != NULL); 13409 13410 for (i = 0; i < state->dts_nspeculations; i++) { 13411 if ((buf = spec[i].dtsp_buffer) == NULL) 13412 break; 13413 13414 dtrace_buffer_free(buf); 13415 kmem_free(buf, bufsize); 13416 } 13417 13418 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13419 state->dts_nspeculations = 0; 13420 state->dts_speculations = NULL; 13421 13422 out: 13423 mutex_exit(&dtrace_lock); 13424 mutex_exit(&cpu_lock); 13425 13426 return (rval); 13427 } 13428 13429 static int 13430 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13431 { 13432 dtrace_icookie_t cookie; 13433 13434 ASSERT(MUTEX_HELD(&dtrace_lock)); 13435 13436 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13437 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13438 return (EINVAL); 13439 13440 /* 13441 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13442 * to be sure that every CPU has seen it. See below for the details 13443 * on why this is done. 13444 */ 13445 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13446 dtrace_sync(); 13447 13448 /* 13449 * By this point, it is impossible for any CPU to be still processing 13450 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13451 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13452 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13453 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13454 * iff we're in the END probe. 13455 */ 13456 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13457 dtrace_sync(); 13458 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13459 13460 /* 13461 * Finally, we can release the reserve and call the END probe. We 13462 * disable interrupts across calling the END probe to allow us to 13463 * return the CPU on which we actually called the END probe. This 13464 * allows user-land to be sure that this CPU's principal buffer is 13465 * processed last. 13466 */ 13467 state->dts_reserve = 0; 13468 13469 cookie = dtrace_interrupt_disable(); 13470 *cpu = curcpu; 13471 dtrace_probe(dtrace_probeid_end, 13472 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13473 dtrace_interrupt_enable(cookie); 13474 13475 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13476 dtrace_sync(); 13477 13478 return (0); 13479 } 13480 13481 static int 13482 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13483 dtrace_optval_t val) 13484 { 13485 ASSERT(MUTEX_HELD(&dtrace_lock)); 13486 13487 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13488 return (EBUSY); 13489 13490 if (option >= DTRACEOPT_MAX) 13491 return (EINVAL); 13492 13493 if (option != DTRACEOPT_CPU && val < 0) 13494 return (EINVAL); 13495 13496 switch (option) { 13497 case DTRACEOPT_DESTRUCTIVE: 13498 if (dtrace_destructive_disallow) 13499 return (EACCES); 13500 13501 state->dts_cred.dcr_destructive = 1; 13502 break; 13503 13504 case DTRACEOPT_BUFSIZE: 13505 case DTRACEOPT_DYNVARSIZE: 13506 case DTRACEOPT_AGGSIZE: 13507 case DTRACEOPT_SPECSIZE: 13508 case DTRACEOPT_STRSIZE: 13509 if (val < 0) 13510 return (EINVAL); 13511 13512 if (val >= LONG_MAX) { 13513 /* 13514 * If this is an otherwise negative value, set it to 13515 * the highest multiple of 128m less than LONG_MAX. 13516 * Technically, we're adjusting the size without 13517 * regard to the buffer resizing policy, but in fact, 13518 * this has no effect -- if we set the buffer size to 13519 * ~LONG_MAX and the buffer policy is ultimately set to 13520 * be "manual", the buffer allocation is guaranteed to 13521 * fail, if only because the allocation requires two 13522 * buffers. (We set the the size to the highest 13523 * multiple of 128m because it ensures that the size 13524 * will remain a multiple of a megabyte when 13525 * repeatedly halved -- all the way down to 15m.) 13526 */ 13527 val = LONG_MAX - (1 << 27) + 1; 13528 } 13529 } 13530 13531 state->dts_options[option] = val; 13532 13533 return (0); 13534 } 13535 13536 static void 13537 dtrace_state_destroy(dtrace_state_t *state) 13538 { 13539 dtrace_ecb_t *ecb; 13540 dtrace_vstate_t *vstate = &state->dts_vstate; 13541 #if defined(sun) 13542 minor_t minor = getminor(state->dts_dev); 13543 #endif 13544 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13545 dtrace_speculation_t *spec = state->dts_speculations; 13546 int nspec = state->dts_nspeculations; 13547 uint32_t match; 13548 13549 ASSERT(MUTEX_HELD(&dtrace_lock)); 13550 ASSERT(MUTEX_HELD(&cpu_lock)); 13551 13552 /* 13553 * First, retract any retained enablings for this state. 13554 */ 13555 dtrace_enabling_retract(state); 13556 ASSERT(state->dts_nretained == 0); 13557 13558 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13559 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13560 /* 13561 * We have managed to come into dtrace_state_destroy() on a 13562 * hot enabling -- almost certainly because of a disorderly 13563 * shutdown of a consumer. (That is, a consumer that is 13564 * exiting without having called dtrace_stop().) In this case, 13565 * we're going to set our activity to be KILLED, and then 13566 * issue a sync to be sure that everyone is out of probe 13567 * context before we start blowing away ECBs. 13568 */ 13569 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13570 dtrace_sync(); 13571 } 13572 13573 /* 13574 * Release the credential hold we took in dtrace_state_create(). 13575 */ 13576 if (state->dts_cred.dcr_cred != NULL) 13577 crfree(state->dts_cred.dcr_cred); 13578 13579 /* 13580 * Now we can safely disable and destroy any enabled probes. Because 13581 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13582 * (especially if they're all enabled), we take two passes through the 13583 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13584 * in the second we disable whatever is left over. 13585 */ 13586 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13587 for (i = 0; i < state->dts_necbs; i++) { 13588 if ((ecb = state->dts_ecbs[i]) == NULL) 13589 continue; 13590 13591 if (match && ecb->dte_probe != NULL) { 13592 dtrace_probe_t *probe = ecb->dte_probe; 13593 dtrace_provider_t *prov = probe->dtpr_provider; 13594 13595 if (!(prov->dtpv_priv.dtpp_flags & match)) 13596 continue; 13597 } 13598 13599 dtrace_ecb_disable(ecb); 13600 dtrace_ecb_destroy(ecb); 13601 } 13602 13603 if (!match) 13604 break; 13605 } 13606 13607 /* 13608 * Before we free the buffers, perform one more sync to assure that 13609 * every CPU is out of probe context. 13610 */ 13611 dtrace_sync(); 13612 13613 dtrace_buffer_free(state->dts_buffer); 13614 dtrace_buffer_free(state->dts_aggbuffer); 13615 13616 for (i = 0; i < nspec; i++) 13617 dtrace_buffer_free(spec[i].dtsp_buffer); 13618 13619 #if defined(sun) 13620 if (state->dts_cleaner != CYCLIC_NONE) 13621 cyclic_remove(state->dts_cleaner); 13622 13623 if (state->dts_deadman != CYCLIC_NONE) 13624 cyclic_remove(state->dts_deadman); 13625 #else 13626 callout_stop(&state->dts_cleaner); 13627 callout_drain(&state->dts_cleaner); 13628 callout_stop(&state->dts_deadman); 13629 callout_drain(&state->dts_deadman); 13630 #endif 13631 13632 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13633 dtrace_vstate_fini(vstate); 13634 if (state->dts_ecbs != NULL) 13635 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13636 13637 if (state->dts_aggregations != NULL) { 13638 #ifdef DEBUG 13639 for (i = 0; i < state->dts_naggregations; i++) 13640 ASSERT(state->dts_aggregations[i] == NULL); 13641 #endif 13642 ASSERT(state->dts_naggregations > 0); 13643 kmem_free(state->dts_aggregations, 13644 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13645 } 13646 13647 kmem_free(state->dts_buffer, bufsize); 13648 kmem_free(state->dts_aggbuffer, bufsize); 13649 13650 for (i = 0; i < nspec; i++) 13651 kmem_free(spec[i].dtsp_buffer, bufsize); 13652 13653 if (spec != NULL) 13654 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13655 13656 dtrace_format_destroy(state); 13657 13658 if (state->dts_aggid_arena != NULL) { 13659 #if defined(sun) 13660 vmem_destroy(state->dts_aggid_arena); 13661 #else 13662 delete_unrhdr(state->dts_aggid_arena); 13663 #endif 13664 state->dts_aggid_arena = NULL; 13665 } 13666 #if defined(sun) 13667 ddi_soft_state_free(dtrace_softstate, minor); 13668 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13669 #endif 13670 } 13671 13672 /* 13673 * DTrace Anonymous Enabling Functions 13674 */ 13675 static dtrace_state_t * 13676 dtrace_anon_grab(void) 13677 { 13678 dtrace_state_t *state; 13679 13680 ASSERT(MUTEX_HELD(&dtrace_lock)); 13681 13682 if ((state = dtrace_anon.dta_state) == NULL) { 13683 ASSERT(dtrace_anon.dta_enabling == NULL); 13684 return (NULL); 13685 } 13686 13687 ASSERT(dtrace_anon.dta_enabling != NULL); 13688 ASSERT(dtrace_retained != NULL); 13689 13690 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13691 dtrace_anon.dta_enabling = NULL; 13692 dtrace_anon.dta_state = NULL; 13693 13694 return (state); 13695 } 13696 13697 static void 13698 dtrace_anon_property(void) 13699 { 13700 int i, rv; 13701 dtrace_state_t *state; 13702 dof_hdr_t *dof; 13703 char c[32]; /* enough for "dof-data-" + digits */ 13704 13705 ASSERT(MUTEX_HELD(&dtrace_lock)); 13706 ASSERT(MUTEX_HELD(&cpu_lock)); 13707 13708 for (i = 0; ; i++) { 13709 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13710 13711 dtrace_err_verbose = 1; 13712 13713 if ((dof = dtrace_dof_property(c)) == NULL) { 13714 dtrace_err_verbose = 0; 13715 break; 13716 } 13717 13718 #if defined(sun) 13719 /* 13720 * We want to create anonymous state, so we need to transition 13721 * the kernel debugger to indicate that DTrace is active. If 13722 * this fails (e.g. because the debugger has modified text in 13723 * some way), we won't continue with the processing. 13724 */ 13725 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13726 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13727 "enabling ignored."); 13728 dtrace_dof_destroy(dof); 13729 break; 13730 } 13731 #endif 13732 13733 /* 13734 * If we haven't allocated an anonymous state, we'll do so now. 13735 */ 13736 if ((state = dtrace_anon.dta_state) == NULL) { 13737 #if defined(sun) 13738 state = dtrace_state_create(NULL, NULL); 13739 #else 13740 state = dtrace_state_create(NULL); 13741 #endif 13742 dtrace_anon.dta_state = state; 13743 13744 if (state == NULL) { 13745 /* 13746 * This basically shouldn't happen: the only 13747 * failure mode from dtrace_state_create() is a 13748 * failure of ddi_soft_state_zalloc() that 13749 * itself should never happen. Still, the 13750 * interface allows for a failure mode, and 13751 * we want to fail as gracefully as possible: 13752 * we'll emit an error message and cease 13753 * processing anonymous state in this case. 13754 */ 13755 cmn_err(CE_WARN, "failed to create " 13756 "anonymous state"); 13757 dtrace_dof_destroy(dof); 13758 break; 13759 } 13760 } 13761 13762 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13763 &dtrace_anon.dta_enabling, 0, B_TRUE); 13764 13765 if (rv == 0) 13766 rv = dtrace_dof_options(dof, state); 13767 13768 dtrace_err_verbose = 0; 13769 dtrace_dof_destroy(dof); 13770 13771 if (rv != 0) { 13772 /* 13773 * This is malformed DOF; chuck any anonymous state 13774 * that we created. 13775 */ 13776 ASSERT(dtrace_anon.dta_enabling == NULL); 13777 dtrace_state_destroy(state); 13778 dtrace_anon.dta_state = NULL; 13779 break; 13780 } 13781 13782 ASSERT(dtrace_anon.dta_enabling != NULL); 13783 } 13784 13785 if (dtrace_anon.dta_enabling != NULL) { 13786 int rval; 13787 13788 /* 13789 * dtrace_enabling_retain() can only fail because we are 13790 * trying to retain more enablings than are allowed -- but 13791 * we only have one anonymous enabling, and we are guaranteed 13792 * to be allowed at least one retained enabling; we assert 13793 * that dtrace_enabling_retain() returns success. 13794 */ 13795 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13796 ASSERT(rval == 0); 13797 13798 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13799 } 13800 } 13801 13802 #if defined(sun) 13803 /* 13804 * DTrace Helper Functions 13805 */ 13806 static void 13807 dtrace_helper_trace(dtrace_helper_action_t *helper, 13808 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13809 { 13810 uint32_t size, next, nnext, i; 13811 dtrace_helptrace_t *ent; 13812 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 13813 13814 if (!dtrace_helptrace_enabled) 13815 return; 13816 13817 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13818 13819 /* 13820 * What would a tracing framework be without its own tracing 13821 * framework? (Well, a hell of a lot simpler, for starters...) 13822 */ 13823 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13824 sizeof (uint64_t) - sizeof (uint64_t); 13825 13826 /* 13827 * Iterate until we can allocate a slot in the trace buffer. 13828 */ 13829 do { 13830 next = dtrace_helptrace_next; 13831 13832 if (next + size < dtrace_helptrace_bufsize) { 13833 nnext = next + size; 13834 } else { 13835 nnext = size; 13836 } 13837 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13838 13839 /* 13840 * We have our slot; fill it in. 13841 */ 13842 if (nnext == size) 13843 next = 0; 13844 13845 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13846 ent->dtht_helper = helper; 13847 ent->dtht_where = where; 13848 ent->dtht_nlocals = vstate->dtvs_nlocals; 13849 13850 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13851 mstate->dtms_fltoffs : -1; 13852 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13853 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 13854 13855 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13856 dtrace_statvar_t *svar; 13857 13858 if ((svar = vstate->dtvs_locals[i]) == NULL) 13859 continue; 13860 13861 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13862 ent->dtht_locals[i] = 13863 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 13864 } 13865 } 13866 #endif 13867 13868 #if defined(sun) 13869 static uint64_t 13870 dtrace_helper(int which, dtrace_mstate_t *mstate, 13871 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13872 { 13873 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 13874 uint64_t sarg0 = mstate->dtms_arg[0]; 13875 uint64_t sarg1 = mstate->dtms_arg[1]; 13876 uint64_t rval; 13877 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13878 dtrace_helper_action_t *helper; 13879 dtrace_vstate_t *vstate; 13880 dtrace_difo_t *pred; 13881 int i, trace = dtrace_helptrace_enabled; 13882 13883 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13884 13885 if (helpers == NULL) 13886 return (0); 13887 13888 if ((helper = helpers->dthps_actions[which]) == NULL) 13889 return (0); 13890 13891 vstate = &helpers->dthps_vstate; 13892 mstate->dtms_arg[0] = arg0; 13893 mstate->dtms_arg[1] = arg1; 13894 13895 /* 13896 * Now iterate over each helper. If its predicate evaluates to 'true', 13897 * we'll call the corresponding actions. Note that the below calls 13898 * to dtrace_dif_emulate() may set faults in machine state. This is 13899 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13900 * the stored DIF offset with its own (which is the desired behavior). 13901 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13902 * from machine state; this is okay, too. 13903 */ 13904 for (; helper != NULL; helper = helper->dtha_next) { 13905 if ((pred = helper->dtha_predicate) != NULL) { 13906 if (trace) 13907 dtrace_helper_trace(helper, mstate, vstate, 0); 13908 13909 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13910 goto next; 13911 13912 if (*flags & CPU_DTRACE_FAULT) 13913 goto err; 13914 } 13915 13916 for (i = 0; i < helper->dtha_nactions; i++) { 13917 if (trace) 13918 dtrace_helper_trace(helper, 13919 mstate, vstate, i + 1); 13920 13921 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13922 mstate, vstate, state); 13923 13924 if (*flags & CPU_DTRACE_FAULT) 13925 goto err; 13926 } 13927 13928 next: 13929 if (trace) 13930 dtrace_helper_trace(helper, mstate, vstate, 13931 DTRACE_HELPTRACE_NEXT); 13932 } 13933 13934 if (trace) 13935 dtrace_helper_trace(helper, mstate, vstate, 13936 DTRACE_HELPTRACE_DONE); 13937 13938 /* 13939 * Restore the arg0 that we saved upon entry. 13940 */ 13941 mstate->dtms_arg[0] = sarg0; 13942 mstate->dtms_arg[1] = sarg1; 13943 13944 return (rval); 13945 13946 err: 13947 if (trace) 13948 dtrace_helper_trace(helper, mstate, vstate, 13949 DTRACE_HELPTRACE_ERR); 13950 13951 /* 13952 * Restore the arg0 that we saved upon entry. 13953 */ 13954 mstate->dtms_arg[0] = sarg0; 13955 mstate->dtms_arg[1] = sarg1; 13956 13957 return (0); 13958 } 13959 13960 static void 13961 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13962 dtrace_vstate_t *vstate) 13963 { 13964 int i; 13965 13966 if (helper->dtha_predicate != NULL) 13967 dtrace_difo_release(helper->dtha_predicate, vstate); 13968 13969 for (i = 0; i < helper->dtha_nactions; i++) { 13970 ASSERT(helper->dtha_actions[i] != NULL); 13971 dtrace_difo_release(helper->dtha_actions[i], vstate); 13972 } 13973 13974 kmem_free(helper->dtha_actions, 13975 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13976 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13977 } 13978 13979 static int 13980 dtrace_helper_destroygen(int gen) 13981 { 13982 proc_t *p = curproc; 13983 dtrace_helpers_t *help = p->p_dtrace_helpers; 13984 dtrace_vstate_t *vstate; 13985 int i; 13986 13987 ASSERT(MUTEX_HELD(&dtrace_lock)); 13988 13989 if (help == NULL || gen > help->dthps_generation) 13990 return (EINVAL); 13991 13992 vstate = &help->dthps_vstate; 13993 13994 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13995 dtrace_helper_action_t *last = NULL, *h, *next; 13996 13997 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13998 next = h->dtha_next; 13999 14000 if (h->dtha_generation == gen) { 14001 if (last != NULL) { 14002 last->dtha_next = next; 14003 } else { 14004 help->dthps_actions[i] = next; 14005 } 14006 14007 dtrace_helper_action_destroy(h, vstate); 14008 } else { 14009 last = h; 14010 } 14011 } 14012 } 14013 14014 /* 14015 * Interate until we've cleared out all helper providers with the 14016 * given generation number. 14017 */ 14018 for (;;) { 14019 dtrace_helper_provider_t *prov; 14020 14021 /* 14022 * Look for a helper provider with the right generation. We 14023 * have to start back at the beginning of the list each time 14024 * because we drop dtrace_lock. It's unlikely that we'll make 14025 * more than two passes. 14026 */ 14027 for (i = 0; i < help->dthps_nprovs; i++) { 14028 prov = help->dthps_provs[i]; 14029 14030 if (prov->dthp_generation == gen) 14031 break; 14032 } 14033 14034 /* 14035 * If there were no matches, we're done. 14036 */ 14037 if (i == help->dthps_nprovs) 14038 break; 14039 14040 /* 14041 * Move the last helper provider into this slot. 14042 */ 14043 help->dthps_nprovs--; 14044 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14045 help->dthps_provs[help->dthps_nprovs] = NULL; 14046 14047 mutex_exit(&dtrace_lock); 14048 14049 /* 14050 * If we have a meta provider, remove this helper provider. 14051 */ 14052 mutex_enter(&dtrace_meta_lock); 14053 if (dtrace_meta_pid != NULL) { 14054 ASSERT(dtrace_deferred_pid == NULL); 14055 dtrace_helper_provider_remove(&prov->dthp_prov, 14056 p->p_pid); 14057 } 14058 mutex_exit(&dtrace_meta_lock); 14059 14060 dtrace_helper_provider_destroy(prov); 14061 14062 mutex_enter(&dtrace_lock); 14063 } 14064 14065 return (0); 14066 } 14067 #endif 14068 14069 #if defined(sun) 14070 static int 14071 dtrace_helper_validate(dtrace_helper_action_t *helper) 14072 { 14073 int err = 0, i; 14074 dtrace_difo_t *dp; 14075 14076 if ((dp = helper->dtha_predicate) != NULL) 14077 err += dtrace_difo_validate_helper(dp); 14078 14079 for (i = 0; i < helper->dtha_nactions; i++) 14080 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14081 14082 return (err == 0); 14083 } 14084 #endif 14085 14086 #if defined(sun) 14087 static int 14088 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14089 { 14090 dtrace_helpers_t *help; 14091 dtrace_helper_action_t *helper, *last; 14092 dtrace_actdesc_t *act; 14093 dtrace_vstate_t *vstate; 14094 dtrace_predicate_t *pred; 14095 int count = 0, nactions = 0, i; 14096 14097 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14098 return (EINVAL); 14099 14100 help = curproc->p_dtrace_helpers; 14101 last = help->dthps_actions[which]; 14102 vstate = &help->dthps_vstate; 14103 14104 for (count = 0; last != NULL; last = last->dtha_next) { 14105 count++; 14106 if (last->dtha_next == NULL) 14107 break; 14108 } 14109 14110 /* 14111 * If we already have dtrace_helper_actions_max helper actions for this 14112 * helper action type, we'll refuse to add a new one. 14113 */ 14114 if (count >= dtrace_helper_actions_max) 14115 return (ENOSPC); 14116 14117 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14118 helper->dtha_generation = help->dthps_generation; 14119 14120 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14121 ASSERT(pred->dtp_difo != NULL); 14122 dtrace_difo_hold(pred->dtp_difo); 14123 helper->dtha_predicate = pred->dtp_difo; 14124 } 14125 14126 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14127 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14128 goto err; 14129 14130 if (act->dtad_difo == NULL) 14131 goto err; 14132 14133 nactions++; 14134 } 14135 14136 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14137 (helper->dtha_nactions = nactions), KM_SLEEP); 14138 14139 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14140 dtrace_difo_hold(act->dtad_difo); 14141 helper->dtha_actions[i++] = act->dtad_difo; 14142 } 14143 14144 if (!dtrace_helper_validate(helper)) 14145 goto err; 14146 14147 if (last == NULL) { 14148 help->dthps_actions[which] = helper; 14149 } else { 14150 last->dtha_next = helper; 14151 } 14152 14153 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14154 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14155 dtrace_helptrace_next = 0; 14156 } 14157 14158 return (0); 14159 err: 14160 dtrace_helper_action_destroy(helper, vstate); 14161 return (EINVAL); 14162 } 14163 14164 static void 14165 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14166 dof_helper_t *dofhp) 14167 { 14168 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14169 14170 mutex_enter(&dtrace_meta_lock); 14171 mutex_enter(&dtrace_lock); 14172 14173 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14174 /* 14175 * If the dtrace module is loaded but not attached, or if 14176 * there aren't isn't a meta provider registered to deal with 14177 * these provider descriptions, we need to postpone creating 14178 * the actual providers until later. 14179 */ 14180 14181 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14182 dtrace_deferred_pid != help) { 14183 help->dthps_deferred = 1; 14184 help->dthps_pid = p->p_pid; 14185 help->dthps_next = dtrace_deferred_pid; 14186 help->dthps_prev = NULL; 14187 if (dtrace_deferred_pid != NULL) 14188 dtrace_deferred_pid->dthps_prev = help; 14189 dtrace_deferred_pid = help; 14190 } 14191 14192 mutex_exit(&dtrace_lock); 14193 14194 } else if (dofhp != NULL) { 14195 /* 14196 * If the dtrace module is loaded and we have a particular 14197 * helper provider description, pass that off to the 14198 * meta provider. 14199 */ 14200 14201 mutex_exit(&dtrace_lock); 14202 14203 dtrace_helper_provide(dofhp, p->p_pid); 14204 14205 } else { 14206 /* 14207 * Otherwise, just pass all the helper provider descriptions 14208 * off to the meta provider. 14209 */ 14210 14211 int i; 14212 mutex_exit(&dtrace_lock); 14213 14214 for (i = 0; i < help->dthps_nprovs; i++) { 14215 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14216 p->p_pid); 14217 } 14218 } 14219 14220 mutex_exit(&dtrace_meta_lock); 14221 } 14222 14223 static int 14224 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14225 { 14226 dtrace_helpers_t *help; 14227 dtrace_helper_provider_t *hprov, **tmp_provs; 14228 uint_t tmp_maxprovs, i; 14229 14230 ASSERT(MUTEX_HELD(&dtrace_lock)); 14231 14232 help = curproc->p_dtrace_helpers; 14233 ASSERT(help != NULL); 14234 14235 /* 14236 * If we already have dtrace_helper_providers_max helper providers, 14237 * we're refuse to add a new one. 14238 */ 14239 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14240 return (ENOSPC); 14241 14242 /* 14243 * Check to make sure this isn't a duplicate. 14244 */ 14245 for (i = 0; i < help->dthps_nprovs; i++) { 14246 if (dofhp->dofhp_addr == 14247 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14248 return (EALREADY); 14249 } 14250 14251 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14252 hprov->dthp_prov = *dofhp; 14253 hprov->dthp_ref = 1; 14254 hprov->dthp_generation = gen; 14255 14256 /* 14257 * Allocate a bigger table for helper providers if it's already full. 14258 */ 14259 if (help->dthps_maxprovs == help->dthps_nprovs) { 14260 tmp_maxprovs = help->dthps_maxprovs; 14261 tmp_provs = help->dthps_provs; 14262 14263 if (help->dthps_maxprovs == 0) 14264 help->dthps_maxprovs = 2; 14265 else 14266 help->dthps_maxprovs *= 2; 14267 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14268 help->dthps_maxprovs = dtrace_helper_providers_max; 14269 14270 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14271 14272 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14273 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14274 14275 if (tmp_provs != NULL) { 14276 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14277 sizeof (dtrace_helper_provider_t *)); 14278 kmem_free(tmp_provs, tmp_maxprovs * 14279 sizeof (dtrace_helper_provider_t *)); 14280 } 14281 } 14282 14283 help->dthps_provs[help->dthps_nprovs] = hprov; 14284 help->dthps_nprovs++; 14285 14286 return (0); 14287 } 14288 14289 static void 14290 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14291 { 14292 mutex_enter(&dtrace_lock); 14293 14294 if (--hprov->dthp_ref == 0) { 14295 dof_hdr_t *dof; 14296 mutex_exit(&dtrace_lock); 14297 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14298 dtrace_dof_destroy(dof); 14299 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14300 } else { 14301 mutex_exit(&dtrace_lock); 14302 } 14303 } 14304 14305 static int 14306 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14307 { 14308 uintptr_t daddr = (uintptr_t)dof; 14309 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14310 dof_provider_t *provider; 14311 dof_probe_t *probe; 14312 uint8_t *arg; 14313 char *strtab, *typestr; 14314 dof_stridx_t typeidx; 14315 size_t typesz; 14316 uint_t nprobes, j, k; 14317 14318 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14319 14320 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14321 dtrace_dof_error(dof, "misaligned section offset"); 14322 return (-1); 14323 } 14324 14325 /* 14326 * The section needs to be large enough to contain the DOF provider 14327 * structure appropriate for the given version. 14328 */ 14329 if (sec->dofs_size < 14330 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14331 offsetof(dof_provider_t, dofpv_prenoffs) : 14332 sizeof (dof_provider_t))) { 14333 dtrace_dof_error(dof, "provider section too small"); 14334 return (-1); 14335 } 14336 14337 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14338 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14339 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14340 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14341 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14342 14343 if (str_sec == NULL || prb_sec == NULL || 14344 arg_sec == NULL || off_sec == NULL) 14345 return (-1); 14346 14347 enoff_sec = NULL; 14348 14349 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14350 provider->dofpv_prenoffs != DOF_SECT_NONE && 14351 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14352 provider->dofpv_prenoffs)) == NULL) 14353 return (-1); 14354 14355 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14356 14357 if (provider->dofpv_name >= str_sec->dofs_size || 14358 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14359 dtrace_dof_error(dof, "invalid provider name"); 14360 return (-1); 14361 } 14362 14363 if (prb_sec->dofs_entsize == 0 || 14364 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14365 dtrace_dof_error(dof, "invalid entry size"); 14366 return (-1); 14367 } 14368 14369 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14370 dtrace_dof_error(dof, "misaligned entry size"); 14371 return (-1); 14372 } 14373 14374 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14375 dtrace_dof_error(dof, "invalid entry size"); 14376 return (-1); 14377 } 14378 14379 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14380 dtrace_dof_error(dof, "misaligned section offset"); 14381 return (-1); 14382 } 14383 14384 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14385 dtrace_dof_error(dof, "invalid entry size"); 14386 return (-1); 14387 } 14388 14389 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14390 14391 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14392 14393 /* 14394 * Take a pass through the probes to check for errors. 14395 */ 14396 for (j = 0; j < nprobes; j++) { 14397 probe = (dof_probe_t *)(uintptr_t)(daddr + 14398 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14399 14400 if (probe->dofpr_func >= str_sec->dofs_size) { 14401 dtrace_dof_error(dof, "invalid function name"); 14402 return (-1); 14403 } 14404 14405 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14406 dtrace_dof_error(dof, "function name too long"); 14407 return (-1); 14408 } 14409 14410 if (probe->dofpr_name >= str_sec->dofs_size || 14411 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14412 dtrace_dof_error(dof, "invalid probe name"); 14413 return (-1); 14414 } 14415 14416 /* 14417 * The offset count must not wrap the index, and the offsets 14418 * must also not overflow the section's data. 14419 */ 14420 if (probe->dofpr_offidx + probe->dofpr_noffs < 14421 probe->dofpr_offidx || 14422 (probe->dofpr_offidx + probe->dofpr_noffs) * 14423 off_sec->dofs_entsize > off_sec->dofs_size) { 14424 dtrace_dof_error(dof, "invalid probe offset"); 14425 return (-1); 14426 } 14427 14428 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14429 /* 14430 * If there's no is-enabled offset section, make sure 14431 * there aren't any is-enabled offsets. Otherwise 14432 * perform the same checks as for probe offsets 14433 * (immediately above). 14434 */ 14435 if (enoff_sec == NULL) { 14436 if (probe->dofpr_enoffidx != 0 || 14437 probe->dofpr_nenoffs != 0) { 14438 dtrace_dof_error(dof, "is-enabled " 14439 "offsets with null section"); 14440 return (-1); 14441 } 14442 } else if (probe->dofpr_enoffidx + 14443 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14444 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14445 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14446 dtrace_dof_error(dof, "invalid is-enabled " 14447 "offset"); 14448 return (-1); 14449 } 14450 14451 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14452 dtrace_dof_error(dof, "zero probe and " 14453 "is-enabled offsets"); 14454 return (-1); 14455 } 14456 } else if (probe->dofpr_noffs == 0) { 14457 dtrace_dof_error(dof, "zero probe offsets"); 14458 return (-1); 14459 } 14460 14461 if (probe->dofpr_argidx + probe->dofpr_xargc < 14462 probe->dofpr_argidx || 14463 (probe->dofpr_argidx + probe->dofpr_xargc) * 14464 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14465 dtrace_dof_error(dof, "invalid args"); 14466 return (-1); 14467 } 14468 14469 typeidx = probe->dofpr_nargv; 14470 typestr = strtab + probe->dofpr_nargv; 14471 for (k = 0; k < probe->dofpr_nargc; k++) { 14472 if (typeidx >= str_sec->dofs_size) { 14473 dtrace_dof_error(dof, "bad " 14474 "native argument type"); 14475 return (-1); 14476 } 14477 14478 typesz = strlen(typestr) + 1; 14479 if (typesz > DTRACE_ARGTYPELEN) { 14480 dtrace_dof_error(dof, "native " 14481 "argument type too long"); 14482 return (-1); 14483 } 14484 typeidx += typesz; 14485 typestr += typesz; 14486 } 14487 14488 typeidx = probe->dofpr_xargv; 14489 typestr = strtab + probe->dofpr_xargv; 14490 for (k = 0; k < probe->dofpr_xargc; k++) { 14491 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14492 dtrace_dof_error(dof, "bad " 14493 "native argument index"); 14494 return (-1); 14495 } 14496 14497 if (typeidx >= str_sec->dofs_size) { 14498 dtrace_dof_error(dof, "bad " 14499 "translated argument type"); 14500 return (-1); 14501 } 14502 14503 typesz = strlen(typestr) + 1; 14504 if (typesz > DTRACE_ARGTYPELEN) { 14505 dtrace_dof_error(dof, "translated argument " 14506 "type too long"); 14507 return (-1); 14508 } 14509 14510 typeidx += typesz; 14511 typestr += typesz; 14512 } 14513 } 14514 14515 return (0); 14516 } 14517 14518 static int 14519 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14520 { 14521 dtrace_helpers_t *help; 14522 dtrace_vstate_t *vstate; 14523 dtrace_enabling_t *enab = NULL; 14524 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14525 uintptr_t daddr = (uintptr_t)dof; 14526 14527 ASSERT(MUTEX_HELD(&dtrace_lock)); 14528 14529 if ((help = curproc->p_dtrace_helpers) == NULL) 14530 help = dtrace_helpers_create(curproc); 14531 14532 vstate = &help->dthps_vstate; 14533 14534 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14535 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14536 dtrace_dof_destroy(dof); 14537 return (rv); 14538 } 14539 14540 /* 14541 * Look for helper providers and validate their descriptions. 14542 */ 14543 if (dhp != NULL) { 14544 for (i = 0; i < dof->dofh_secnum; i++) { 14545 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14546 dof->dofh_secoff + i * dof->dofh_secsize); 14547 14548 if (sec->dofs_type != DOF_SECT_PROVIDER) 14549 continue; 14550 14551 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14552 dtrace_enabling_destroy(enab); 14553 dtrace_dof_destroy(dof); 14554 return (-1); 14555 } 14556 14557 nprovs++; 14558 } 14559 } 14560 14561 /* 14562 * Now we need to walk through the ECB descriptions in the enabling. 14563 */ 14564 for (i = 0; i < enab->dten_ndesc; i++) { 14565 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14566 dtrace_probedesc_t *desc = &ep->dted_probe; 14567 14568 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14569 continue; 14570 14571 if (strcmp(desc->dtpd_mod, "helper") != 0) 14572 continue; 14573 14574 if (strcmp(desc->dtpd_func, "ustack") != 0) 14575 continue; 14576 14577 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14578 ep)) != 0) { 14579 /* 14580 * Adding this helper action failed -- we are now going 14581 * to rip out the entire generation and return failure. 14582 */ 14583 (void) dtrace_helper_destroygen(help->dthps_generation); 14584 dtrace_enabling_destroy(enab); 14585 dtrace_dof_destroy(dof); 14586 return (-1); 14587 } 14588 14589 nhelpers++; 14590 } 14591 14592 if (nhelpers < enab->dten_ndesc) 14593 dtrace_dof_error(dof, "unmatched helpers"); 14594 14595 gen = help->dthps_generation++; 14596 dtrace_enabling_destroy(enab); 14597 14598 if (dhp != NULL && nprovs > 0) { 14599 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14600 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14601 mutex_exit(&dtrace_lock); 14602 dtrace_helper_provider_register(curproc, help, dhp); 14603 mutex_enter(&dtrace_lock); 14604 14605 destroy = 0; 14606 } 14607 } 14608 14609 if (destroy) 14610 dtrace_dof_destroy(dof); 14611 14612 return (gen); 14613 } 14614 14615 static dtrace_helpers_t * 14616 dtrace_helpers_create(proc_t *p) 14617 { 14618 dtrace_helpers_t *help; 14619 14620 ASSERT(MUTEX_HELD(&dtrace_lock)); 14621 ASSERT(p->p_dtrace_helpers == NULL); 14622 14623 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14624 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14625 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14626 14627 p->p_dtrace_helpers = help; 14628 dtrace_helpers++; 14629 14630 return (help); 14631 } 14632 14633 static void 14634 dtrace_helpers_destroy(void) 14635 { 14636 dtrace_helpers_t *help; 14637 dtrace_vstate_t *vstate; 14638 proc_t *p = curproc; 14639 int i; 14640 14641 mutex_enter(&dtrace_lock); 14642 14643 ASSERT(p->p_dtrace_helpers != NULL); 14644 ASSERT(dtrace_helpers > 0); 14645 14646 help = p->p_dtrace_helpers; 14647 vstate = &help->dthps_vstate; 14648 14649 /* 14650 * We're now going to lose the help from this process. 14651 */ 14652 p->p_dtrace_helpers = NULL; 14653 dtrace_sync(); 14654 14655 /* 14656 * Destory the helper actions. 14657 */ 14658 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14659 dtrace_helper_action_t *h, *next; 14660 14661 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14662 next = h->dtha_next; 14663 dtrace_helper_action_destroy(h, vstate); 14664 h = next; 14665 } 14666 } 14667 14668 mutex_exit(&dtrace_lock); 14669 14670 /* 14671 * Destroy the helper providers. 14672 */ 14673 if (help->dthps_maxprovs > 0) { 14674 mutex_enter(&dtrace_meta_lock); 14675 if (dtrace_meta_pid != NULL) { 14676 ASSERT(dtrace_deferred_pid == NULL); 14677 14678 for (i = 0; i < help->dthps_nprovs; i++) { 14679 dtrace_helper_provider_remove( 14680 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14681 } 14682 } else { 14683 mutex_enter(&dtrace_lock); 14684 ASSERT(help->dthps_deferred == 0 || 14685 help->dthps_next != NULL || 14686 help->dthps_prev != NULL || 14687 help == dtrace_deferred_pid); 14688 14689 /* 14690 * Remove the helper from the deferred list. 14691 */ 14692 if (help->dthps_next != NULL) 14693 help->dthps_next->dthps_prev = help->dthps_prev; 14694 if (help->dthps_prev != NULL) 14695 help->dthps_prev->dthps_next = help->dthps_next; 14696 if (dtrace_deferred_pid == help) { 14697 dtrace_deferred_pid = help->dthps_next; 14698 ASSERT(help->dthps_prev == NULL); 14699 } 14700 14701 mutex_exit(&dtrace_lock); 14702 } 14703 14704 mutex_exit(&dtrace_meta_lock); 14705 14706 for (i = 0; i < help->dthps_nprovs; i++) { 14707 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14708 } 14709 14710 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14711 sizeof (dtrace_helper_provider_t *)); 14712 } 14713 14714 mutex_enter(&dtrace_lock); 14715 14716 dtrace_vstate_fini(&help->dthps_vstate); 14717 kmem_free(help->dthps_actions, 14718 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14719 kmem_free(help, sizeof (dtrace_helpers_t)); 14720 14721 --dtrace_helpers; 14722 mutex_exit(&dtrace_lock); 14723 } 14724 14725 static void 14726 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14727 { 14728 dtrace_helpers_t *help, *newhelp; 14729 dtrace_helper_action_t *helper, *new, *last; 14730 dtrace_difo_t *dp; 14731 dtrace_vstate_t *vstate; 14732 int i, j, sz, hasprovs = 0; 14733 14734 mutex_enter(&dtrace_lock); 14735 ASSERT(from->p_dtrace_helpers != NULL); 14736 ASSERT(dtrace_helpers > 0); 14737 14738 help = from->p_dtrace_helpers; 14739 newhelp = dtrace_helpers_create(to); 14740 ASSERT(to->p_dtrace_helpers != NULL); 14741 14742 newhelp->dthps_generation = help->dthps_generation; 14743 vstate = &newhelp->dthps_vstate; 14744 14745 /* 14746 * Duplicate the helper actions. 14747 */ 14748 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14749 if ((helper = help->dthps_actions[i]) == NULL) 14750 continue; 14751 14752 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14753 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14754 KM_SLEEP); 14755 new->dtha_generation = helper->dtha_generation; 14756 14757 if ((dp = helper->dtha_predicate) != NULL) { 14758 dp = dtrace_difo_duplicate(dp, vstate); 14759 new->dtha_predicate = dp; 14760 } 14761 14762 new->dtha_nactions = helper->dtha_nactions; 14763 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14764 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14765 14766 for (j = 0; j < new->dtha_nactions; j++) { 14767 dtrace_difo_t *dp = helper->dtha_actions[j]; 14768 14769 ASSERT(dp != NULL); 14770 dp = dtrace_difo_duplicate(dp, vstate); 14771 new->dtha_actions[j] = dp; 14772 } 14773 14774 if (last != NULL) { 14775 last->dtha_next = new; 14776 } else { 14777 newhelp->dthps_actions[i] = new; 14778 } 14779 14780 last = new; 14781 } 14782 } 14783 14784 /* 14785 * Duplicate the helper providers and register them with the 14786 * DTrace framework. 14787 */ 14788 if (help->dthps_nprovs > 0) { 14789 newhelp->dthps_nprovs = help->dthps_nprovs; 14790 newhelp->dthps_maxprovs = help->dthps_nprovs; 14791 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14792 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14793 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14794 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14795 newhelp->dthps_provs[i]->dthp_ref++; 14796 } 14797 14798 hasprovs = 1; 14799 } 14800 14801 mutex_exit(&dtrace_lock); 14802 14803 if (hasprovs) 14804 dtrace_helper_provider_register(to, newhelp, NULL); 14805 } 14806 #endif 14807 14808 #if defined(sun) 14809 /* 14810 * DTrace Hook Functions 14811 */ 14812 static void 14813 dtrace_module_loaded(modctl_t *ctl) 14814 { 14815 dtrace_provider_t *prv; 14816 14817 mutex_enter(&dtrace_provider_lock); 14818 mutex_enter(&mod_lock); 14819 14820 ASSERT(ctl->mod_busy); 14821 14822 /* 14823 * We're going to call each providers per-module provide operation 14824 * specifying only this module. 14825 */ 14826 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14827 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14828 14829 mutex_exit(&mod_lock); 14830 mutex_exit(&dtrace_provider_lock); 14831 14832 /* 14833 * If we have any retained enablings, we need to match against them. 14834 * Enabling probes requires that cpu_lock be held, and we cannot hold 14835 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14836 * module. (In particular, this happens when loading scheduling 14837 * classes.) So if we have any retained enablings, we need to dispatch 14838 * our task queue to do the match for us. 14839 */ 14840 mutex_enter(&dtrace_lock); 14841 14842 if (dtrace_retained == NULL) { 14843 mutex_exit(&dtrace_lock); 14844 return; 14845 } 14846 14847 (void) taskq_dispatch(dtrace_taskq, 14848 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14849 14850 mutex_exit(&dtrace_lock); 14851 14852 /* 14853 * And now, for a little heuristic sleaze: in general, we want to 14854 * match modules as soon as they load. However, we cannot guarantee 14855 * this, because it would lead us to the lock ordering violation 14856 * outlined above. The common case, of course, is that cpu_lock is 14857 * _not_ held -- so we delay here for a clock tick, hoping that that's 14858 * long enough for the task queue to do its work. If it's not, it's 14859 * not a serious problem -- it just means that the module that we 14860 * just loaded may not be immediately instrumentable. 14861 */ 14862 delay(1); 14863 } 14864 14865 static void 14866 dtrace_module_unloaded(modctl_t *ctl) 14867 { 14868 dtrace_probe_t template, *probe, *first, *next; 14869 dtrace_provider_t *prov; 14870 14871 template.dtpr_mod = ctl->mod_modname; 14872 14873 mutex_enter(&dtrace_provider_lock); 14874 mutex_enter(&mod_lock); 14875 mutex_enter(&dtrace_lock); 14876 14877 if (dtrace_bymod == NULL) { 14878 /* 14879 * The DTrace module is loaded (obviously) but not attached; 14880 * we don't have any work to do. 14881 */ 14882 mutex_exit(&dtrace_provider_lock); 14883 mutex_exit(&mod_lock); 14884 mutex_exit(&dtrace_lock); 14885 return; 14886 } 14887 14888 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14889 probe != NULL; probe = probe->dtpr_nextmod) { 14890 if (probe->dtpr_ecb != NULL) { 14891 mutex_exit(&dtrace_provider_lock); 14892 mutex_exit(&mod_lock); 14893 mutex_exit(&dtrace_lock); 14894 14895 /* 14896 * This shouldn't _actually_ be possible -- we're 14897 * unloading a module that has an enabled probe in it. 14898 * (It's normally up to the provider to make sure that 14899 * this can't happen.) However, because dtps_enable() 14900 * doesn't have a failure mode, there can be an 14901 * enable/unload race. Upshot: we don't want to 14902 * assert, but we're not going to disable the 14903 * probe, either. 14904 */ 14905 if (dtrace_err_verbose) { 14906 cmn_err(CE_WARN, "unloaded module '%s' had " 14907 "enabled probes", ctl->mod_modname); 14908 } 14909 14910 return; 14911 } 14912 } 14913 14914 probe = first; 14915 14916 for (first = NULL; probe != NULL; probe = next) { 14917 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14918 14919 dtrace_probes[probe->dtpr_id - 1] = NULL; 14920 14921 next = probe->dtpr_nextmod; 14922 dtrace_hash_remove(dtrace_bymod, probe); 14923 dtrace_hash_remove(dtrace_byfunc, probe); 14924 dtrace_hash_remove(dtrace_byname, probe); 14925 14926 if (first == NULL) { 14927 first = probe; 14928 probe->dtpr_nextmod = NULL; 14929 } else { 14930 probe->dtpr_nextmod = first; 14931 first = probe; 14932 } 14933 } 14934 14935 /* 14936 * We've removed all of the module's probes from the hash chains and 14937 * from the probe array. Now issue a dtrace_sync() to be sure that 14938 * everyone has cleared out from any probe array processing. 14939 */ 14940 dtrace_sync(); 14941 14942 for (probe = first; probe != NULL; probe = first) { 14943 first = probe->dtpr_nextmod; 14944 prov = probe->dtpr_provider; 14945 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14946 probe->dtpr_arg); 14947 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14948 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14949 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14950 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14951 kmem_free(probe, sizeof (dtrace_probe_t)); 14952 } 14953 14954 mutex_exit(&dtrace_lock); 14955 mutex_exit(&mod_lock); 14956 mutex_exit(&dtrace_provider_lock); 14957 } 14958 14959 static void 14960 dtrace_suspend(void) 14961 { 14962 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14963 } 14964 14965 static void 14966 dtrace_resume(void) 14967 { 14968 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14969 } 14970 #endif 14971 14972 static int 14973 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14974 { 14975 ASSERT(MUTEX_HELD(&cpu_lock)); 14976 mutex_enter(&dtrace_lock); 14977 14978 switch (what) { 14979 case CPU_CONFIG: { 14980 dtrace_state_t *state; 14981 dtrace_optval_t *opt, rs, c; 14982 14983 /* 14984 * For now, we only allocate a new buffer for anonymous state. 14985 */ 14986 if ((state = dtrace_anon.dta_state) == NULL) 14987 break; 14988 14989 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14990 break; 14991 14992 opt = state->dts_options; 14993 c = opt[DTRACEOPT_CPU]; 14994 14995 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14996 break; 14997 14998 /* 14999 * Regardless of what the actual policy is, we're going to 15000 * temporarily set our resize policy to be manual. We're 15001 * also going to temporarily set our CPU option to denote 15002 * the newly configured CPU. 15003 */ 15004 rs = opt[DTRACEOPT_BUFRESIZE]; 15005 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15006 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15007 15008 (void) dtrace_state_buffers(state); 15009 15010 opt[DTRACEOPT_BUFRESIZE] = rs; 15011 opt[DTRACEOPT_CPU] = c; 15012 15013 break; 15014 } 15015 15016 case CPU_UNCONFIG: 15017 /* 15018 * We don't free the buffer in the CPU_UNCONFIG case. (The 15019 * buffer will be freed when the consumer exits.) 15020 */ 15021 break; 15022 15023 default: 15024 break; 15025 } 15026 15027 mutex_exit(&dtrace_lock); 15028 return (0); 15029 } 15030 15031 #if defined(sun) 15032 static void 15033 dtrace_cpu_setup_initial(processorid_t cpu) 15034 { 15035 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15036 } 15037 #endif 15038 15039 static void 15040 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15041 { 15042 if (dtrace_toxranges >= dtrace_toxranges_max) { 15043 int osize, nsize; 15044 dtrace_toxrange_t *range; 15045 15046 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15047 15048 if (osize == 0) { 15049 ASSERT(dtrace_toxrange == NULL); 15050 ASSERT(dtrace_toxranges_max == 0); 15051 dtrace_toxranges_max = 1; 15052 } else { 15053 dtrace_toxranges_max <<= 1; 15054 } 15055 15056 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15057 range = kmem_zalloc(nsize, KM_SLEEP); 15058 15059 if (dtrace_toxrange != NULL) { 15060 ASSERT(osize != 0); 15061 bcopy(dtrace_toxrange, range, osize); 15062 kmem_free(dtrace_toxrange, osize); 15063 } 15064 15065 dtrace_toxrange = range; 15066 } 15067 15068 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15069 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15070 15071 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15072 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15073 dtrace_toxranges++; 15074 } 15075 15076 /* 15077 * DTrace Driver Cookbook Functions 15078 */ 15079 #if defined(sun) 15080 /*ARGSUSED*/ 15081 static int 15082 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15083 { 15084 dtrace_provider_id_t id; 15085 dtrace_state_t *state = NULL; 15086 dtrace_enabling_t *enab; 15087 15088 mutex_enter(&cpu_lock); 15089 mutex_enter(&dtrace_provider_lock); 15090 mutex_enter(&dtrace_lock); 15091 15092 if (ddi_soft_state_init(&dtrace_softstate, 15093 sizeof (dtrace_state_t), 0) != 0) { 15094 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15095 mutex_exit(&cpu_lock); 15096 mutex_exit(&dtrace_provider_lock); 15097 mutex_exit(&dtrace_lock); 15098 return (DDI_FAILURE); 15099 } 15100 15101 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15102 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15103 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15104 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15105 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15106 ddi_remove_minor_node(devi, NULL); 15107 ddi_soft_state_fini(&dtrace_softstate); 15108 mutex_exit(&cpu_lock); 15109 mutex_exit(&dtrace_provider_lock); 15110 mutex_exit(&dtrace_lock); 15111 return (DDI_FAILURE); 15112 } 15113 15114 ddi_report_dev(devi); 15115 dtrace_devi = devi; 15116 15117 dtrace_modload = dtrace_module_loaded; 15118 dtrace_modunload = dtrace_module_unloaded; 15119 dtrace_cpu_init = dtrace_cpu_setup_initial; 15120 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15121 dtrace_helpers_fork = dtrace_helpers_duplicate; 15122 dtrace_cpustart_init = dtrace_suspend; 15123 dtrace_cpustart_fini = dtrace_resume; 15124 dtrace_debugger_init = dtrace_suspend; 15125 dtrace_debugger_fini = dtrace_resume; 15126 15127 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15128 15129 ASSERT(MUTEX_HELD(&cpu_lock)); 15130 15131 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15132 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15133 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15134 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15135 VM_SLEEP | VMC_IDENTIFIER); 15136 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15137 1, INT_MAX, 0); 15138 15139 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15140 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15141 NULL, NULL, NULL, NULL, NULL, 0); 15142 15143 ASSERT(MUTEX_HELD(&cpu_lock)); 15144 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15145 offsetof(dtrace_probe_t, dtpr_nextmod), 15146 offsetof(dtrace_probe_t, dtpr_prevmod)); 15147 15148 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15149 offsetof(dtrace_probe_t, dtpr_nextfunc), 15150 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15151 15152 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15153 offsetof(dtrace_probe_t, dtpr_nextname), 15154 offsetof(dtrace_probe_t, dtpr_prevname)); 15155 15156 if (dtrace_retain_max < 1) { 15157 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15158 "setting to 1", dtrace_retain_max); 15159 dtrace_retain_max = 1; 15160 } 15161 15162 /* 15163 * Now discover our toxic ranges. 15164 */ 15165 dtrace_toxic_ranges(dtrace_toxrange_add); 15166 15167 /* 15168 * Before we register ourselves as a provider to our own framework, 15169 * we would like to assert that dtrace_provider is NULL -- but that's 15170 * not true if we were loaded as a dependency of a DTrace provider. 15171 * Once we've registered, we can assert that dtrace_provider is our 15172 * pseudo provider. 15173 */ 15174 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15175 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15176 15177 ASSERT(dtrace_provider != NULL); 15178 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15179 15180 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15181 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15182 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15183 dtrace_provider, NULL, NULL, "END", 0, NULL); 15184 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15185 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15186 15187 dtrace_anon_property(); 15188 mutex_exit(&cpu_lock); 15189 15190 /* 15191 * If DTrace helper tracing is enabled, we need to allocate the 15192 * trace buffer and initialize the values. 15193 */ 15194 if (dtrace_helptrace_enabled) { 15195 ASSERT(dtrace_helptrace_buffer == NULL); 15196 dtrace_helptrace_buffer = 15197 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15198 dtrace_helptrace_next = 0; 15199 } 15200 15201 /* 15202 * If there are already providers, we must ask them to provide their 15203 * probes, and then match any anonymous enabling against them. Note 15204 * that there should be no other retained enablings at this time: 15205 * the only retained enablings at this time should be the anonymous 15206 * enabling. 15207 */ 15208 if (dtrace_anon.dta_enabling != NULL) { 15209 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15210 15211 dtrace_enabling_provide(NULL); 15212 state = dtrace_anon.dta_state; 15213 15214 /* 15215 * We couldn't hold cpu_lock across the above call to 15216 * dtrace_enabling_provide(), but we must hold it to actually 15217 * enable the probes. We have to drop all of our locks, pick 15218 * up cpu_lock, and regain our locks before matching the 15219 * retained anonymous enabling. 15220 */ 15221 mutex_exit(&dtrace_lock); 15222 mutex_exit(&dtrace_provider_lock); 15223 15224 mutex_enter(&cpu_lock); 15225 mutex_enter(&dtrace_provider_lock); 15226 mutex_enter(&dtrace_lock); 15227 15228 if ((enab = dtrace_anon.dta_enabling) != NULL) 15229 (void) dtrace_enabling_match(enab, NULL); 15230 15231 mutex_exit(&cpu_lock); 15232 } 15233 15234 mutex_exit(&dtrace_lock); 15235 mutex_exit(&dtrace_provider_lock); 15236 15237 if (state != NULL) { 15238 /* 15239 * If we created any anonymous state, set it going now. 15240 */ 15241 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15242 } 15243 15244 return (DDI_SUCCESS); 15245 } 15246 #endif 15247 15248 #if !defined(sun) 15249 #if __FreeBSD_version >= 800039 15250 static void 15251 dtrace_dtr(void *data __unused) 15252 { 15253 } 15254 #endif 15255 #endif 15256 15257 /*ARGSUSED*/ 15258 static int 15259 #if defined(sun) 15260 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15261 #else 15262 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15263 #endif 15264 { 15265 dtrace_state_t *state; 15266 uint32_t priv; 15267 uid_t uid; 15268 zoneid_t zoneid; 15269 15270 #if defined(sun) 15271 if (getminor(*devp) == DTRACEMNRN_HELPER) 15272 return (0); 15273 15274 /* 15275 * If this wasn't an open with the "helper" minor, then it must be 15276 * the "dtrace" minor. 15277 */ 15278 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15279 #else 15280 cred_t *cred_p = NULL; 15281 15282 #if __FreeBSD_version < 800039 15283 /* 15284 * The first minor device is the one that is cloned so there is 15285 * nothing more to do here. 15286 */ 15287 if (dev2unit(dev) == 0) 15288 return 0; 15289 15290 /* 15291 * Devices are cloned, so if the DTrace state has already 15292 * been allocated, that means this device belongs to a 15293 * different client. Each client should open '/dev/dtrace' 15294 * to get a cloned device. 15295 */ 15296 if (dev->si_drv1 != NULL) 15297 return (EBUSY); 15298 #endif 15299 15300 cred_p = dev->si_cred; 15301 #endif 15302 15303 /* 15304 * If no DTRACE_PRIV_* bits are set in the credential, then the 15305 * caller lacks sufficient permission to do anything with DTrace. 15306 */ 15307 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15308 if (priv == DTRACE_PRIV_NONE) { 15309 #if !defined(sun) 15310 #if __FreeBSD_version < 800039 15311 /* Destroy the cloned device. */ 15312 destroy_dev(dev); 15313 #endif 15314 #endif 15315 15316 return (EACCES); 15317 } 15318 15319 /* 15320 * Ask all providers to provide all their probes. 15321 */ 15322 mutex_enter(&dtrace_provider_lock); 15323 dtrace_probe_provide(NULL, NULL); 15324 mutex_exit(&dtrace_provider_lock); 15325 15326 mutex_enter(&cpu_lock); 15327 mutex_enter(&dtrace_lock); 15328 dtrace_opens++; 15329 dtrace_membar_producer(); 15330 15331 #if defined(sun) 15332 /* 15333 * If the kernel debugger is active (that is, if the kernel debugger 15334 * modified text in some way), we won't allow the open. 15335 */ 15336 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15337 dtrace_opens--; 15338 mutex_exit(&cpu_lock); 15339 mutex_exit(&dtrace_lock); 15340 return (EBUSY); 15341 } 15342 15343 state = dtrace_state_create(devp, cred_p); 15344 #else 15345 state = dtrace_state_create(dev); 15346 #if __FreeBSD_version < 800039 15347 dev->si_drv1 = state; 15348 #else 15349 devfs_set_cdevpriv(state, dtrace_dtr); 15350 #endif 15351 #endif 15352 15353 mutex_exit(&cpu_lock); 15354 15355 if (state == NULL) { 15356 #if defined(sun) 15357 if (--dtrace_opens == 0) 15358 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15359 #else 15360 --dtrace_opens; 15361 #endif 15362 mutex_exit(&dtrace_lock); 15363 #if !defined(sun) 15364 #if __FreeBSD_version < 800039 15365 /* Destroy the cloned device. */ 15366 destroy_dev(dev); 15367 #endif 15368 #endif 15369 return (EAGAIN); 15370 } 15371 15372 mutex_exit(&dtrace_lock); 15373 15374 return (0); 15375 } 15376 15377 /*ARGSUSED*/ 15378 static int 15379 #if defined(sun) 15380 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15381 #else 15382 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15383 #endif 15384 { 15385 #if defined(sun) 15386 minor_t minor = getminor(dev); 15387 dtrace_state_t *state; 15388 15389 if (minor == DTRACEMNRN_HELPER) 15390 return (0); 15391 15392 state = ddi_get_soft_state(dtrace_softstate, minor); 15393 #else 15394 #if __FreeBSD_version < 800039 15395 dtrace_state_t *state = dev->si_drv1; 15396 15397 /* Check if this is not a cloned device. */ 15398 if (dev2unit(dev) == 0) 15399 return (0); 15400 #else 15401 dtrace_state_t *state; 15402 devfs_get_cdevpriv((void **) &state); 15403 #endif 15404 15405 #endif 15406 15407 mutex_enter(&cpu_lock); 15408 mutex_enter(&dtrace_lock); 15409 15410 if (state != NULL) { 15411 if (state->dts_anon) { 15412 /* 15413 * There is anonymous state. Destroy that first. 15414 */ 15415 ASSERT(dtrace_anon.dta_state == NULL); 15416 dtrace_state_destroy(state->dts_anon); 15417 } 15418 15419 dtrace_state_destroy(state); 15420 15421 #if !defined(sun) 15422 kmem_free(state, 0); 15423 #if __FreeBSD_version < 800039 15424 dev->si_drv1 = NULL; 15425 #else 15426 devfs_clear_cdevpriv(); 15427 #endif 15428 #endif 15429 } 15430 15431 ASSERT(dtrace_opens > 0); 15432 #if defined(sun) 15433 if (--dtrace_opens == 0) 15434 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15435 #else 15436 --dtrace_opens; 15437 #endif 15438 15439 mutex_exit(&dtrace_lock); 15440 mutex_exit(&cpu_lock); 15441 15442 #if __FreeBSD_version < 800039 15443 /* Schedule this cloned device to be destroyed. */ 15444 destroy_dev_sched(dev); 15445 #endif 15446 15447 return (0); 15448 } 15449 15450 #if defined(sun) 15451 /*ARGSUSED*/ 15452 static int 15453 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15454 { 15455 int rval; 15456 dof_helper_t help, *dhp = NULL; 15457 15458 switch (cmd) { 15459 case DTRACEHIOC_ADDDOF: 15460 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15461 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15462 return (EFAULT); 15463 } 15464 15465 dhp = &help; 15466 arg = (intptr_t)help.dofhp_dof; 15467 /*FALLTHROUGH*/ 15468 15469 case DTRACEHIOC_ADD: { 15470 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15471 15472 if (dof == NULL) 15473 return (rval); 15474 15475 mutex_enter(&dtrace_lock); 15476 15477 /* 15478 * dtrace_helper_slurp() takes responsibility for the dof -- 15479 * it may free it now or it may save it and free it later. 15480 */ 15481 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15482 *rv = rval; 15483 rval = 0; 15484 } else { 15485 rval = EINVAL; 15486 } 15487 15488 mutex_exit(&dtrace_lock); 15489 return (rval); 15490 } 15491 15492 case DTRACEHIOC_REMOVE: { 15493 mutex_enter(&dtrace_lock); 15494 rval = dtrace_helper_destroygen(arg); 15495 mutex_exit(&dtrace_lock); 15496 15497 return (rval); 15498 } 15499 15500 default: 15501 break; 15502 } 15503 15504 return (ENOTTY); 15505 } 15506 15507 /*ARGSUSED*/ 15508 static int 15509 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15510 { 15511 minor_t minor = getminor(dev); 15512 dtrace_state_t *state; 15513 int rval; 15514 15515 if (minor == DTRACEMNRN_HELPER) 15516 return (dtrace_ioctl_helper(cmd, arg, rv)); 15517 15518 state = ddi_get_soft_state(dtrace_softstate, minor); 15519 15520 if (state->dts_anon) { 15521 ASSERT(dtrace_anon.dta_state == NULL); 15522 state = state->dts_anon; 15523 } 15524 15525 switch (cmd) { 15526 case DTRACEIOC_PROVIDER: { 15527 dtrace_providerdesc_t pvd; 15528 dtrace_provider_t *pvp; 15529 15530 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15531 return (EFAULT); 15532 15533 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15534 mutex_enter(&dtrace_provider_lock); 15535 15536 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15537 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15538 break; 15539 } 15540 15541 mutex_exit(&dtrace_provider_lock); 15542 15543 if (pvp == NULL) 15544 return (ESRCH); 15545 15546 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15547 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15548 15549 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15550 return (EFAULT); 15551 15552 return (0); 15553 } 15554 15555 case DTRACEIOC_EPROBE: { 15556 dtrace_eprobedesc_t epdesc; 15557 dtrace_ecb_t *ecb; 15558 dtrace_action_t *act; 15559 void *buf; 15560 size_t size; 15561 uintptr_t dest; 15562 int nrecs; 15563 15564 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15565 return (EFAULT); 15566 15567 mutex_enter(&dtrace_lock); 15568 15569 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15570 mutex_exit(&dtrace_lock); 15571 return (EINVAL); 15572 } 15573 15574 if (ecb->dte_probe == NULL) { 15575 mutex_exit(&dtrace_lock); 15576 return (EINVAL); 15577 } 15578 15579 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15580 epdesc.dtepd_uarg = ecb->dte_uarg; 15581 epdesc.dtepd_size = ecb->dte_size; 15582 15583 nrecs = epdesc.dtepd_nrecs; 15584 epdesc.dtepd_nrecs = 0; 15585 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15586 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15587 continue; 15588 15589 epdesc.dtepd_nrecs++; 15590 } 15591 15592 /* 15593 * Now that we have the size, we need to allocate a temporary 15594 * buffer in which to store the complete description. We need 15595 * the temporary buffer to be able to drop dtrace_lock() 15596 * across the copyout(), below. 15597 */ 15598 size = sizeof (dtrace_eprobedesc_t) + 15599 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15600 15601 buf = kmem_alloc(size, KM_SLEEP); 15602 dest = (uintptr_t)buf; 15603 15604 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15605 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15606 15607 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15608 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15609 continue; 15610 15611 if (nrecs-- == 0) 15612 break; 15613 15614 bcopy(&act->dta_rec, (void *)dest, 15615 sizeof (dtrace_recdesc_t)); 15616 dest += sizeof (dtrace_recdesc_t); 15617 } 15618 15619 mutex_exit(&dtrace_lock); 15620 15621 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15622 kmem_free(buf, size); 15623 return (EFAULT); 15624 } 15625 15626 kmem_free(buf, size); 15627 return (0); 15628 } 15629 15630 case DTRACEIOC_AGGDESC: { 15631 dtrace_aggdesc_t aggdesc; 15632 dtrace_action_t *act; 15633 dtrace_aggregation_t *agg; 15634 int nrecs; 15635 uint32_t offs; 15636 dtrace_recdesc_t *lrec; 15637 void *buf; 15638 size_t size; 15639 uintptr_t dest; 15640 15641 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15642 return (EFAULT); 15643 15644 mutex_enter(&dtrace_lock); 15645 15646 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15647 mutex_exit(&dtrace_lock); 15648 return (EINVAL); 15649 } 15650 15651 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15652 15653 nrecs = aggdesc.dtagd_nrecs; 15654 aggdesc.dtagd_nrecs = 0; 15655 15656 offs = agg->dtag_base; 15657 lrec = &agg->dtag_action.dta_rec; 15658 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15659 15660 for (act = agg->dtag_first; ; act = act->dta_next) { 15661 ASSERT(act->dta_intuple || 15662 DTRACEACT_ISAGG(act->dta_kind)); 15663 15664 /* 15665 * If this action has a record size of zero, it 15666 * denotes an argument to the aggregating action. 15667 * Because the presence of this record doesn't (or 15668 * shouldn't) affect the way the data is interpreted, 15669 * we don't copy it out to save user-level the 15670 * confusion of dealing with a zero-length record. 15671 */ 15672 if (act->dta_rec.dtrd_size == 0) { 15673 ASSERT(agg->dtag_hasarg); 15674 continue; 15675 } 15676 15677 aggdesc.dtagd_nrecs++; 15678 15679 if (act == &agg->dtag_action) 15680 break; 15681 } 15682 15683 /* 15684 * Now that we have the size, we need to allocate a temporary 15685 * buffer in which to store the complete description. We need 15686 * the temporary buffer to be able to drop dtrace_lock() 15687 * across the copyout(), below. 15688 */ 15689 size = sizeof (dtrace_aggdesc_t) + 15690 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15691 15692 buf = kmem_alloc(size, KM_SLEEP); 15693 dest = (uintptr_t)buf; 15694 15695 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15696 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15697 15698 for (act = agg->dtag_first; ; act = act->dta_next) { 15699 dtrace_recdesc_t rec = act->dta_rec; 15700 15701 /* 15702 * See the comment in the above loop for why we pass 15703 * over zero-length records. 15704 */ 15705 if (rec.dtrd_size == 0) { 15706 ASSERT(agg->dtag_hasarg); 15707 continue; 15708 } 15709 15710 if (nrecs-- == 0) 15711 break; 15712 15713 rec.dtrd_offset -= offs; 15714 bcopy(&rec, (void *)dest, sizeof (rec)); 15715 dest += sizeof (dtrace_recdesc_t); 15716 15717 if (act == &agg->dtag_action) 15718 break; 15719 } 15720 15721 mutex_exit(&dtrace_lock); 15722 15723 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15724 kmem_free(buf, size); 15725 return (EFAULT); 15726 } 15727 15728 kmem_free(buf, size); 15729 return (0); 15730 } 15731 15732 case DTRACEIOC_ENABLE: { 15733 dof_hdr_t *dof; 15734 dtrace_enabling_t *enab = NULL; 15735 dtrace_vstate_t *vstate; 15736 int err = 0; 15737 15738 *rv = 0; 15739 15740 /* 15741 * If a NULL argument has been passed, we take this as our 15742 * cue to reevaluate our enablings. 15743 */ 15744 if (arg == NULL) { 15745 dtrace_enabling_matchall(); 15746 15747 return (0); 15748 } 15749 15750 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15751 return (rval); 15752 15753 mutex_enter(&cpu_lock); 15754 mutex_enter(&dtrace_lock); 15755 vstate = &state->dts_vstate; 15756 15757 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15758 mutex_exit(&dtrace_lock); 15759 mutex_exit(&cpu_lock); 15760 dtrace_dof_destroy(dof); 15761 return (EBUSY); 15762 } 15763 15764 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15765 mutex_exit(&dtrace_lock); 15766 mutex_exit(&cpu_lock); 15767 dtrace_dof_destroy(dof); 15768 return (EINVAL); 15769 } 15770 15771 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15772 dtrace_enabling_destroy(enab); 15773 mutex_exit(&dtrace_lock); 15774 mutex_exit(&cpu_lock); 15775 dtrace_dof_destroy(dof); 15776 return (rval); 15777 } 15778 15779 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15780 err = dtrace_enabling_retain(enab); 15781 } else { 15782 dtrace_enabling_destroy(enab); 15783 } 15784 15785 mutex_exit(&cpu_lock); 15786 mutex_exit(&dtrace_lock); 15787 dtrace_dof_destroy(dof); 15788 15789 return (err); 15790 } 15791 15792 case DTRACEIOC_REPLICATE: { 15793 dtrace_repldesc_t desc; 15794 dtrace_probedesc_t *match = &desc.dtrpd_match; 15795 dtrace_probedesc_t *create = &desc.dtrpd_create; 15796 int err; 15797 15798 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15799 return (EFAULT); 15800 15801 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15802 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15803 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15804 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15805 15806 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15807 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15808 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15809 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15810 15811 mutex_enter(&dtrace_lock); 15812 err = dtrace_enabling_replicate(state, match, create); 15813 mutex_exit(&dtrace_lock); 15814 15815 return (err); 15816 } 15817 15818 case DTRACEIOC_PROBEMATCH: 15819 case DTRACEIOC_PROBES: { 15820 dtrace_probe_t *probe = NULL; 15821 dtrace_probedesc_t desc; 15822 dtrace_probekey_t pkey; 15823 dtrace_id_t i; 15824 int m = 0; 15825 uint32_t priv; 15826 uid_t uid; 15827 zoneid_t zoneid; 15828 15829 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15830 return (EFAULT); 15831 15832 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15833 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15834 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15835 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15836 15837 /* 15838 * Before we attempt to match this probe, we want to give 15839 * all providers the opportunity to provide it. 15840 */ 15841 if (desc.dtpd_id == DTRACE_IDNONE) { 15842 mutex_enter(&dtrace_provider_lock); 15843 dtrace_probe_provide(&desc, NULL); 15844 mutex_exit(&dtrace_provider_lock); 15845 desc.dtpd_id++; 15846 } 15847 15848 if (cmd == DTRACEIOC_PROBEMATCH) { 15849 dtrace_probekey(&desc, &pkey); 15850 pkey.dtpk_id = DTRACE_IDNONE; 15851 } 15852 15853 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15854 15855 mutex_enter(&dtrace_lock); 15856 15857 if (cmd == DTRACEIOC_PROBEMATCH) { 15858 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15859 if ((probe = dtrace_probes[i - 1]) != NULL && 15860 (m = dtrace_match_probe(probe, &pkey, 15861 priv, uid, zoneid)) != 0) 15862 break; 15863 } 15864 15865 if (m < 0) { 15866 mutex_exit(&dtrace_lock); 15867 return (EINVAL); 15868 } 15869 15870 } else { 15871 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15872 if ((probe = dtrace_probes[i - 1]) != NULL && 15873 dtrace_match_priv(probe, priv, uid, zoneid)) 15874 break; 15875 } 15876 } 15877 15878 if (probe == NULL) { 15879 mutex_exit(&dtrace_lock); 15880 return (ESRCH); 15881 } 15882 15883 dtrace_probe_description(probe, &desc); 15884 mutex_exit(&dtrace_lock); 15885 15886 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15887 return (EFAULT); 15888 15889 return (0); 15890 } 15891 15892 case DTRACEIOC_PROBEARG: { 15893 dtrace_argdesc_t desc; 15894 dtrace_probe_t *probe; 15895 dtrace_provider_t *prov; 15896 15897 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15898 return (EFAULT); 15899 15900 if (desc.dtargd_id == DTRACE_IDNONE) 15901 return (EINVAL); 15902 15903 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15904 return (EINVAL); 15905 15906 mutex_enter(&dtrace_provider_lock); 15907 mutex_enter(&mod_lock); 15908 mutex_enter(&dtrace_lock); 15909 15910 if (desc.dtargd_id > dtrace_nprobes) { 15911 mutex_exit(&dtrace_lock); 15912 mutex_exit(&mod_lock); 15913 mutex_exit(&dtrace_provider_lock); 15914 return (EINVAL); 15915 } 15916 15917 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15918 mutex_exit(&dtrace_lock); 15919 mutex_exit(&mod_lock); 15920 mutex_exit(&dtrace_provider_lock); 15921 return (EINVAL); 15922 } 15923 15924 mutex_exit(&dtrace_lock); 15925 15926 prov = probe->dtpr_provider; 15927 15928 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15929 /* 15930 * There isn't any typed information for this probe. 15931 * Set the argument number to DTRACE_ARGNONE. 15932 */ 15933 desc.dtargd_ndx = DTRACE_ARGNONE; 15934 } else { 15935 desc.dtargd_native[0] = '\0'; 15936 desc.dtargd_xlate[0] = '\0'; 15937 desc.dtargd_mapping = desc.dtargd_ndx; 15938 15939 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15940 probe->dtpr_id, probe->dtpr_arg, &desc); 15941 } 15942 15943 mutex_exit(&mod_lock); 15944 mutex_exit(&dtrace_provider_lock); 15945 15946 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15947 return (EFAULT); 15948 15949 return (0); 15950 } 15951 15952 case DTRACEIOC_GO: { 15953 processorid_t cpuid; 15954 rval = dtrace_state_go(state, &cpuid); 15955 15956 if (rval != 0) 15957 return (rval); 15958 15959 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15960 return (EFAULT); 15961 15962 return (0); 15963 } 15964 15965 case DTRACEIOC_STOP: { 15966 processorid_t cpuid; 15967 15968 mutex_enter(&dtrace_lock); 15969 rval = dtrace_state_stop(state, &cpuid); 15970 mutex_exit(&dtrace_lock); 15971 15972 if (rval != 0) 15973 return (rval); 15974 15975 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15976 return (EFAULT); 15977 15978 return (0); 15979 } 15980 15981 case DTRACEIOC_DOFGET: { 15982 dof_hdr_t hdr, *dof; 15983 uint64_t len; 15984 15985 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15986 return (EFAULT); 15987 15988 mutex_enter(&dtrace_lock); 15989 dof = dtrace_dof_create(state); 15990 mutex_exit(&dtrace_lock); 15991 15992 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15993 rval = copyout(dof, (void *)arg, len); 15994 dtrace_dof_destroy(dof); 15995 15996 return (rval == 0 ? 0 : EFAULT); 15997 } 15998 15999 case DTRACEIOC_AGGSNAP: 16000 case DTRACEIOC_BUFSNAP: { 16001 dtrace_bufdesc_t desc; 16002 caddr_t cached; 16003 dtrace_buffer_t *buf; 16004 16005 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16006 return (EFAULT); 16007 16008 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16009 return (EINVAL); 16010 16011 mutex_enter(&dtrace_lock); 16012 16013 if (cmd == DTRACEIOC_BUFSNAP) { 16014 buf = &state->dts_buffer[desc.dtbd_cpu]; 16015 } else { 16016 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16017 } 16018 16019 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16020 size_t sz = buf->dtb_offset; 16021 16022 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16023 mutex_exit(&dtrace_lock); 16024 return (EBUSY); 16025 } 16026 16027 /* 16028 * If this buffer has already been consumed, we're 16029 * going to indicate that there's nothing left here 16030 * to consume. 16031 */ 16032 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16033 mutex_exit(&dtrace_lock); 16034 16035 desc.dtbd_size = 0; 16036 desc.dtbd_drops = 0; 16037 desc.dtbd_errors = 0; 16038 desc.dtbd_oldest = 0; 16039 sz = sizeof (desc); 16040 16041 if (copyout(&desc, (void *)arg, sz) != 0) 16042 return (EFAULT); 16043 16044 return (0); 16045 } 16046 16047 /* 16048 * If this is a ring buffer that has wrapped, we want 16049 * to copy the whole thing out. 16050 */ 16051 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16052 dtrace_buffer_polish(buf); 16053 sz = buf->dtb_size; 16054 } 16055 16056 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16057 mutex_exit(&dtrace_lock); 16058 return (EFAULT); 16059 } 16060 16061 desc.dtbd_size = sz; 16062 desc.dtbd_drops = buf->dtb_drops; 16063 desc.dtbd_errors = buf->dtb_errors; 16064 desc.dtbd_oldest = buf->dtb_xamot_offset; 16065 16066 mutex_exit(&dtrace_lock); 16067 16068 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16069 return (EFAULT); 16070 16071 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16072 16073 return (0); 16074 } 16075 16076 if (buf->dtb_tomax == NULL) { 16077 ASSERT(buf->dtb_xamot == NULL); 16078 mutex_exit(&dtrace_lock); 16079 return (ENOENT); 16080 } 16081 16082 cached = buf->dtb_tomax; 16083 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16084 16085 dtrace_xcall(desc.dtbd_cpu, 16086 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16087 16088 state->dts_errors += buf->dtb_xamot_errors; 16089 16090 /* 16091 * If the buffers did not actually switch, then the cross call 16092 * did not take place -- presumably because the given CPU is 16093 * not in the ready set. If this is the case, we'll return 16094 * ENOENT. 16095 */ 16096 if (buf->dtb_tomax == cached) { 16097 ASSERT(buf->dtb_xamot != cached); 16098 mutex_exit(&dtrace_lock); 16099 return (ENOENT); 16100 } 16101 16102 ASSERT(cached == buf->dtb_xamot); 16103 16104 /* 16105 * We have our snapshot; now copy it out. 16106 */ 16107 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16108 buf->dtb_xamot_offset) != 0) { 16109 mutex_exit(&dtrace_lock); 16110 return (EFAULT); 16111 } 16112 16113 desc.dtbd_size = buf->dtb_xamot_offset; 16114 desc.dtbd_drops = buf->dtb_xamot_drops; 16115 desc.dtbd_errors = buf->dtb_xamot_errors; 16116 desc.dtbd_oldest = 0; 16117 16118 mutex_exit(&dtrace_lock); 16119 16120 /* 16121 * Finally, copy out the buffer description. 16122 */ 16123 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16124 return (EFAULT); 16125 16126 return (0); 16127 } 16128 16129 case DTRACEIOC_CONF: { 16130 dtrace_conf_t conf; 16131 16132 bzero(&conf, sizeof (conf)); 16133 conf.dtc_difversion = DIF_VERSION; 16134 conf.dtc_difintregs = DIF_DIR_NREGS; 16135 conf.dtc_diftupregs = DIF_DTR_NREGS; 16136 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16137 16138 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16139 return (EFAULT); 16140 16141 return (0); 16142 } 16143 16144 case DTRACEIOC_STATUS: { 16145 dtrace_status_t stat; 16146 dtrace_dstate_t *dstate; 16147 int i, j; 16148 uint64_t nerrs; 16149 16150 /* 16151 * See the comment in dtrace_state_deadman() for the reason 16152 * for setting dts_laststatus to INT64_MAX before setting 16153 * it to the correct value. 16154 */ 16155 state->dts_laststatus = INT64_MAX; 16156 dtrace_membar_producer(); 16157 state->dts_laststatus = dtrace_gethrtime(); 16158 16159 bzero(&stat, sizeof (stat)); 16160 16161 mutex_enter(&dtrace_lock); 16162 16163 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16164 mutex_exit(&dtrace_lock); 16165 return (ENOENT); 16166 } 16167 16168 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16169 stat.dtst_exiting = 1; 16170 16171 nerrs = state->dts_errors; 16172 dstate = &state->dts_vstate.dtvs_dynvars; 16173 16174 for (i = 0; i < NCPU; i++) { 16175 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16176 16177 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16178 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16179 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16180 16181 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16182 stat.dtst_filled++; 16183 16184 nerrs += state->dts_buffer[i].dtb_errors; 16185 16186 for (j = 0; j < state->dts_nspeculations; j++) { 16187 dtrace_speculation_t *spec; 16188 dtrace_buffer_t *buf; 16189 16190 spec = &state->dts_speculations[j]; 16191 buf = &spec->dtsp_buffer[i]; 16192 stat.dtst_specdrops += buf->dtb_xamot_drops; 16193 } 16194 } 16195 16196 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16197 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16198 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16199 stat.dtst_dblerrors = state->dts_dblerrors; 16200 stat.dtst_killed = 16201 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16202 stat.dtst_errors = nerrs; 16203 16204 mutex_exit(&dtrace_lock); 16205 16206 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16207 return (EFAULT); 16208 16209 return (0); 16210 } 16211 16212 case DTRACEIOC_FORMAT: { 16213 dtrace_fmtdesc_t fmt; 16214 char *str; 16215 int len; 16216 16217 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16218 return (EFAULT); 16219 16220 mutex_enter(&dtrace_lock); 16221 16222 if (fmt.dtfd_format == 0 || 16223 fmt.dtfd_format > state->dts_nformats) { 16224 mutex_exit(&dtrace_lock); 16225 return (EINVAL); 16226 } 16227 16228 /* 16229 * Format strings are allocated contiguously and they are 16230 * never freed; if a format index is less than the number 16231 * of formats, we can assert that the format map is non-NULL 16232 * and that the format for the specified index is non-NULL. 16233 */ 16234 ASSERT(state->dts_formats != NULL); 16235 str = state->dts_formats[fmt.dtfd_format - 1]; 16236 ASSERT(str != NULL); 16237 16238 len = strlen(str) + 1; 16239 16240 if (len > fmt.dtfd_length) { 16241 fmt.dtfd_length = len; 16242 16243 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16244 mutex_exit(&dtrace_lock); 16245 return (EINVAL); 16246 } 16247 } else { 16248 if (copyout(str, fmt.dtfd_string, len) != 0) { 16249 mutex_exit(&dtrace_lock); 16250 return (EINVAL); 16251 } 16252 } 16253 16254 mutex_exit(&dtrace_lock); 16255 return (0); 16256 } 16257 16258 default: 16259 break; 16260 } 16261 16262 return (ENOTTY); 16263 } 16264 16265 /*ARGSUSED*/ 16266 static int 16267 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16268 { 16269 dtrace_state_t *state; 16270 16271 switch (cmd) { 16272 case DDI_DETACH: 16273 break; 16274 16275 case DDI_SUSPEND: 16276 return (DDI_SUCCESS); 16277 16278 default: 16279 return (DDI_FAILURE); 16280 } 16281 16282 mutex_enter(&cpu_lock); 16283 mutex_enter(&dtrace_provider_lock); 16284 mutex_enter(&dtrace_lock); 16285 16286 ASSERT(dtrace_opens == 0); 16287 16288 if (dtrace_helpers > 0) { 16289 mutex_exit(&dtrace_provider_lock); 16290 mutex_exit(&dtrace_lock); 16291 mutex_exit(&cpu_lock); 16292 return (DDI_FAILURE); 16293 } 16294 16295 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16296 mutex_exit(&dtrace_provider_lock); 16297 mutex_exit(&dtrace_lock); 16298 mutex_exit(&cpu_lock); 16299 return (DDI_FAILURE); 16300 } 16301 16302 dtrace_provider = NULL; 16303 16304 if ((state = dtrace_anon_grab()) != NULL) { 16305 /* 16306 * If there were ECBs on this state, the provider should 16307 * have not been allowed to detach; assert that there is 16308 * none. 16309 */ 16310 ASSERT(state->dts_necbs == 0); 16311 dtrace_state_destroy(state); 16312 16313 /* 16314 * If we're being detached with anonymous state, we need to 16315 * indicate to the kernel debugger that DTrace is now inactive. 16316 */ 16317 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16318 } 16319 16320 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16321 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16322 dtrace_cpu_init = NULL; 16323 dtrace_helpers_cleanup = NULL; 16324 dtrace_helpers_fork = NULL; 16325 dtrace_cpustart_init = NULL; 16326 dtrace_cpustart_fini = NULL; 16327 dtrace_debugger_init = NULL; 16328 dtrace_debugger_fini = NULL; 16329 dtrace_modload = NULL; 16330 dtrace_modunload = NULL; 16331 16332 mutex_exit(&cpu_lock); 16333 16334 if (dtrace_helptrace_enabled) { 16335 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16336 dtrace_helptrace_buffer = NULL; 16337 } 16338 16339 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16340 dtrace_probes = NULL; 16341 dtrace_nprobes = 0; 16342 16343 dtrace_hash_destroy(dtrace_bymod); 16344 dtrace_hash_destroy(dtrace_byfunc); 16345 dtrace_hash_destroy(dtrace_byname); 16346 dtrace_bymod = NULL; 16347 dtrace_byfunc = NULL; 16348 dtrace_byname = NULL; 16349 16350 kmem_cache_destroy(dtrace_state_cache); 16351 vmem_destroy(dtrace_minor); 16352 vmem_destroy(dtrace_arena); 16353 16354 if (dtrace_toxrange != NULL) { 16355 kmem_free(dtrace_toxrange, 16356 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16357 dtrace_toxrange = NULL; 16358 dtrace_toxranges = 0; 16359 dtrace_toxranges_max = 0; 16360 } 16361 16362 ddi_remove_minor_node(dtrace_devi, NULL); 16363 dtrace_devi = NULL; 16364 16365 ddi_soft_state_fini(&dtrace_softstate); 16366 16367 ASSERT(dtrace_vtime_references == 0); 16368 ASSERT(dtrace_opens == 0); 16369 ASSERT(dtrace_retained == NULL); 16370 16371 mutex_exit(&dtrace_lock); 16372 mutex_exit(&dtrace_provider_lock); 16373 16374 /* 16375 * We don't destroy the task queue until after we have dropped our 16376 * locks (taskq_destroy() may block on running tasks). To prevent 16377 * attempting to do work after we have effectively detached but before 16378 * the task queue has been destroyed, all tasks dispatched via the 16379 * task queue must check that DTrace is still attached before 16380 * performing any operation. 16381 */ 16382 taskq_destroy(dtrace_taskq); 16383 dtrace_taskq = NULL; 16384 16385 return (DDI_SUCCESS); 16386 } 16387 #endif 16388 16389 #if defined(sun) 16390 /*ARGSUSED*/ 16391 static int 16392 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16393 { 16394 int error; 16395 16396 switch (infocmd) { 16397 case DDI_INFO_DEVT2DEVINFO: 16398 *result = (void *)dtrace_devi; 16399 error = DDI_SUCCESS; 16400 break; 16401 case DDI_INFO_DEVT2INSTANCE: 16402 *result = (void *)0; 16403 error = DDI_SUCCESS; 16404 break; 16405 default: 16406 error = DDI_FAILURE; 16407 } 16408 return (error); 16409 } 16410 #endif 16411 16412 #if defined(sun) 16413 static struct cb_ops dtrace_cb_ops = { 16414 dtrace_open, /* open */ 16415 dtrace_close, /* close */ 16416 nulldev, /* strategy */ 16417 nulldev, /* print */ 16418 nodev, /* dump */ 16419 nodev, /* read */ 16420 nodev, /* write */ 16421 dtrace_ioctl, /* ioctl */ 16422 nodev, /* devmap */ 16423 nodev, /* mmap */ 16424 nodev, /* segmap */ 16425 nochpoll, /* poll */ 16426 ddi_prop_op, /* cb_prop_op */ 16427 0, /* streamtab */ 16428 D_NEW | D_MP /* Driver compatibility flag */ 16429 }; 16430 16431 static struct dev_ops dtrace_ops = { 16432 DEVO_REV, /* devo_rev */ 16433 0, /* refcnt */ 16434 dtrace_info, /* get_dev_info */ 16435 nulldev, /* identify */ 16436 nulldev, /* probe */ 16437 dtrace_attach, /* attach */ 16438 dtrace_detach, /* detach */ 16439 nodev, /* reset */ 16440 &dtrace_cb_ops, /* driver operations */ 16441 NULL, /* bus operations */ 16442 nodev /* dev power */ 16443 }; 16444 16445 static struct modldrv modldrv = { 16446 &mod_driverops, /* module type (this is a pseudo driver) */ 16447 "Dynamic Tracing", /* name of module */ 16448 &dtrace_ops, /* driver ops */ 16449 }; 16450 16451 static struct modlinkage modlinkage = { 16452 MODREV_1, 16453 (void *)&modldrv, 16454 NULL 16455 }; 16456 16457 int 16458 _init(void) 16459 { 16460 return (mod_install(&modlinkage)); 16461 } 16462 16463 int 16464 _info(struct modinfo *modinfop) 16465 { 16466 return (mod_info(&modlinkage, modinfop)); 16467 } 16468 16469 int 16470 _fini(void) 16471 { 16472 return (mod_remove(&modlinkage)); 16473 } 16474 #else 16475 16476 static d_ioctl_t dtrace_ioctl; 16477 static void dtrace_load(void *); 16478 static int dtrace_unload(void); 16479 #if __FreeBSD_version < 800039 16480 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16481 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16482 static eventhandler_tag eh_tag; /* Event handler tag. */ 16483 #else 16484 static struct cdev *dtrace_dev; 16485 #endif 16486 16487 void dtrace_invop_init(void); 16488 void dtrace_invop_uninit(void); 16489 16490 static struct cdevsw dtrace_cdevsw = { 16491 .d_version = D_VERSION, 16492 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16493 .d_close = dtrace_close, 16494 .d_ioctl = dtrace_ioctl, 16495 .d_open = dtrace_open, 16496 .d_name = "dtrace", 16497 }; 16498 16499 #include <dtrace_anon.c> 16500 #if __FreeBSD_version < 800039 16501 #include <dtrace_clone.c> 16502 #endif 16503 #include <dtrace_ioctl.c> 16504 #include <dtrace_load.c> 16505 #include <dtrace_modevent.c> 16506 #include <dtrace_sysctl.c> 16507 #include <dtrace_unload.c> 16508 #include <dtrace_vtime.c> 16509 #include <dtrace_hacks.c> 16510 #include <dtrace_isa.c> 16511 16512 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16513 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16514 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16515 16516 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16517 MODULE_VERSION(dtrace, 1); 16518 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16519 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16520 #endif 16521