1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Copyright (c) 2012 by Delphix. All rights reserved 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 /* 33 * DTrace - Dynamic Tracing for Solaris 34 * 35 * This is the implementation of the Solaris Dynamic Tracing framework 36 * (DTrace). The user-visible interface to DTrace is described at length in 37 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 38 * library, the in-kernel DTrace framework, and the DTrace providers are 39 * described in the block comments in the <sys/dtrace.h> header file. The 40 * internal architecture of DTrace is described in the block comments in the 41 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 42 * implementation very much assume mastery of all of these sources; if one has 43 * an unanswered question about the implementation, one should consult them 44 * first. 45 * 46 * The functions here are ordered roughly as follows: 47 * 48 * - Probe context functions 49 * - Probe hashing functions 50 * - Non-probe context utility functions 51 * - Matching functions 52 * - Provider-to-Framework API functions 53 * - Probe management functions 54 * - DIF object functions 55 * - Format functions 56 * - Predicate functions 57 * - ECB functions 58 * - Buffer functions 59 * - Enabling functions 60 * - DOF functions 61 * - Anonymous enabling functions 62 * - Consumer state functions 63 * - Helper functions 64 * - Hook functions 65 * - Driver cookbook functions 66 * 67 * Each group of functions begins with a block comment labelled the "DTrace 68 * [Group] Functions", allowing one to find each block by searching forward 69 * on capital-f functions. 70 */ 71 #include <sys/errno.h> 72 #if !defined(sun) 73 #include <sys/time.h> 74 #endif 75 #include <sys/stat.h> 76 #include <sys/modctl.h> 77 #include <sys/conf.h> 78 #include <sys/systm.h> 79 #if defined(sun) 80 #include <sys/ddi.h> 81 #include <sys/sunddi.h> 82 #endif 83 #include <sys/cpuvar.h> 84 #include <sys/kmem.h> 85 #if defined(sun) 86 #include <sys/strsubr.h> 87 #endif 88 #include <sys/sysmacros.h> 89 #include <sys/dtrace_impl.h> 90 #include <sys/atomic.h> 91 #include <sys/cmn_err.h> 92 #if defined(sun) 93 #include <sys/mutex_impl.h> 94 #include <sys/rwlock_impl.h> 95 #endif 96 #include <sys/ctf_api.h> 97 #if defined(sun) 98 #include <sys/panic.h> 99 #include <sys/priv_impl.h> 100 #endif 101 #include <sys/policy.h> 102 #if defined(sun) 103 #include <sys/cred_impl.h> 104 #include <sys/procfs_isa.h> 105 #endif 106 #include <sys/taskq.h> 107 #if defined(sun) 108 #include <sys/mkdev.h> 109 #include <sys/kdi.h> 110 #endif 111 #include <sys/zone.h> 112 #include <sys/socket.h> 113 #include <netinet/in.h> 114 115 /* FreeBSD includes: */ 116 #if !defined(sun) 117 #include <sys/callout.h> 118 #include <sys/ctype.h> 119 #include <sys/eventhandler.h> 120 #include <sys/limits.h> 121 #include <sys/kdb.h> 122 #include <sys/kernel.h> 123 #include <sys/malloc.h> 124 #include <sys/sysctl.h> 125 #include <sys/lock.h> 126 #include <sys/mutex.h> 127 #include <sys/rwlock.h> 128 #include <sys/sx.h> 129 #include <sys/dtrace_bsd.h> 130 #include <netinet/in.h> 131 #include "dtrace_cddl.h" 132 #include "dtrace_debug.c" 133 #endif 134 135 /* 136 * DTrace Tunable Variables 137 * 138 * The following variables may be tuned by adding a line to /etc/system that 139 * includes both the name of the DTrace module ("dtrace") and the name of the 140 * variable. For example: 141 * 142 * set dtrace:dtrace_destructive_disallow = 1 143 * 144 * In general, the only variables that one should be tuning this way are those 145 * that affect system-wide DTrace behavior, and for which the default behavior 146 * is undesirable. Most of these variables are tunable on a per-consumer 147 * basis using DTrace options, and need not be tuned on a system-wide basis. 148 * When tuning these variables, avoid pathological values; while some attempt 149 * is made to verify the integrity of these variables, they are not considered 150 * part of the supported interface to DTrace, and they are therefore not 151 * checked comprehensively. Further, these variables should not be tuned 152 * dynamically via "mdb -kw" or other means; they should only be tuned via 153 * /etc/system. 154 */ 155 int dtrace_destructive_disallow = 0; 156 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 157 size_t dtrace_difo_maxsize = (256 * 1024); 158 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 159 size_t dtrace_global_maxsize = (16 * 1024); 160 size_t dtrace_actions_max = (16 * 1024); 161 size_t dtrace_retain_max = 1024; 162 dtrace_optval_t dtrace_helper_actions_max = 128; 163 dtrace_optval_t dtrace_helper_providers_max = 32; 164 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 165 size_t dtrace_strsize_default = 256; 166 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 167 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 168 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 169 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 170 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 171 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 172 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 173 dtrace_optval_t dtrace_nspec_default = 1; 174 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 175 dtrace_optval_t dtrace_stackframes_default = 20; 176 dtrace_optval_t dtrace_ustackframes_default = 20; 177 dtrace_optval_t dtrace_jstackframes_default = 50; 178 dtrace_optval_t dtrace_jstackstrsize_default = 512; 179 int dtrace_msgdsize_max = 128; 180 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 181 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 182 int dtrace_devdepth_max = 32; 183 int dtrace_err_verbose; 184 hrtime_t dtrace_deadman_interval = NANOSEC; 185 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 186 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 187 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 188 #if !defined(sun) 189 int dtrace_memstr_max = 4096; 190 #endif 191 192 /* 193 * DTrace External Variables 194 * 195 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 196 * available to DTrace consumers via the backtick (`) syntax. One of these, 197 * dtrace_zero, is made deliberately so: it is provided as a source of 198 * well-known, zero-filled memory. While this variable is not documented, 199 * it is used by some translators as an implementation detail. 200 */ 201 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 202 203 /* 204 * DTrace Internal Variables 205 */ 206 #if defined(sun) 207 static dev_info_t *dtrace_devi; /* device info */ 208 #endif 209 #if defined(sun) 210 static vmem_t *dtrace_arena; /* probe ID arena */ 211 static vmem_t *dtrace_minor; /* minor number arena */ 212 #else 213 static taskq_t *dtrace_taskq; /* task queue */ 214 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 215 #endif 216 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 217 static int dtrace_nprobes; /* number of probes */ 218 static dtrace_provider_t *dtrace_provider; /* provider list */ 219 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 220 static int dtrace_opens; /* number of opens */ 221 static int dtrace_helpers; /* number of helpers */ 222 #if defined(sun) 223 static void *dtrace_softstate; /* softstate pointer */ 224 #endif 225 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 226 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 227 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 228 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 229 static int dtrace_toxranges; /* number of toxic ranges */ 230 static int dtrace_toxranges_max; /* size of toxic range array */ 231 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 232 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 233 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 234 static kthread_t *dtrace_panicked; /* panicking thread */ 235 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 236 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 237 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 238 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 239 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 240 #if !defined(sun) 241 static struct mtx dtrace_unr_mtx; 242 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 243 int dtrace_in_probe; /* non-zero if executing a probe */ 244 #if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 245 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 246 #endif 247 static eventhandler_tag dtrace_kld_load_tag; 248 static eventhandler_tag dtrace_kld_unload_try_tag; 249 #endif 250 251 /* 252 * DTrace Locking 253 * DTrace is protected by three (relatively coarse-grained) locks: 254 * 255 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 256 * including enabling state, probes, ECBs, consumer state, helper state, 257 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 258 * probe context is lock-free -- synchronization is handled via the 259 * dtrace_sync() cross call mechanism. 260 * 261 * (2) dtrace_provider_lock is required when manipulating provider state, or 262 * when provider state must be held constant. 263 * 264 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 265 * when meta provider state must be held constant. 266 * 267 * The lock ordering between these three locks is dtrace_meta_lock before 268 * dtrace_provider_lock before dtrace_lock. (In particular, there are 269 * several places where dtrace_provider_lock is held by the framework as it 270 * calls into the providers -- which then call back into the framework, 271 * grabbing dtrace_lock.) 272 * 273 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 274 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 275 * role as a coarse-grained lock; it is acquired before both of these locks. 276 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 277 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 278 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 279 * acquired _between_ dtrace_provider_lock and dtrace_lock. 280 */ 281 static kmutex_t dtrace_lock; /* probe state lock */ 282 static kmutex_t dtrace_provider_lock; /* provider state lock */ 283 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 284 285 #if !defined(sun) 286 /* XXX FreeBSD hacks. */ 287 #define cr_suid cr_svuid 288 #define cr_sgid cr_svgid 289 #define ipaddr_t in_addr_t 290 #define mod_modname pathname 291 #define vuprintf vprintf 292 #define ttoproc(_a) ((_a)->td_proc) 293 #define crgetzoneid(_a) 0 294 #define NCPU MAXCPU 295 #define SNOCD 0 296 #define CPU_ON_INTR(_a) 0 297 298 #define PRIV_EFFECTIVE (1 << 0) 299 #define PRIV_DTRACE_KERNEL (1 << 1) 300 #define PRIV_DTRACE_PROC (1 << 2) 301 #define PRIV_DTRACE_USER (1 << 3) 302 #define PRIV_PROC_OWNER (1 << 4) 303 #define PRIV_PROC_ZONE (1 << 5) 304 #define PRIV_ALL ~0 305 306 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 307 #endif 308 309 #if defined(sun) 310 #define curcpu CPU->cpu_id 311 #endif 312 313 314 /* 315 * DTrace Provider Variables 316 * 317 * These are the variables relating to DTrace as a provider (that is, the 318 * provider of the BEGIN, END, and ERROR probes). 319 */ 320 static dtrace_pattr_t dtrace_provider_attr = { 321 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 322 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 323 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 324 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 325 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 326 }; 327 328 static void 329 dtrace_nullop(void) 330 {} 331 332 static dtrace_pops_t dtrace_provider_ops = { 333 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 334 (void (*)(void *, modctl_t *))dtrace_nullop, 335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 337 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 338 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 339 NULL, 340 NULL, 341 NULL, 342 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 343 }; 344 345 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 346 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 347 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 348 349 /* 350 * DTrace Helper Tracing Variables 351 */ 352 uint32_t dtrace_helptrace_next = 0; 353 uint32_t dtrace_helptrace_nlocals; 354 char *dtrace_helptrace_buffer; 355 int dtrace_helptrace_bufsize = 512 * 1024; 356 357 #ifdef DEBUG 358 int dtrace_helptrace_enabled = 1; 359 #else 360 int dtrace_helptrace_enabled = 0; 361 #endif 362 363 /* 364 * DTrace Error Hashing 365 * 366 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 367 * table. This is very useful for checking coverage of tests that are 368 * expected to induce DIF or DOF processing errors, and may be useful for 369 * debugging problems in the DIF code generator or in DOF generation . The 370 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 371 */ 372 #ifdef DEBUG 373 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 374 static const char *dtrace_errlast; 375 static kthread_t *dtrace_errthread; 376 static kmutex_t dtrace_errlock; 377 #endif 378 379 /* 380 * DTrace Macros and Constants 381 * 382 * These are various macros that are useful in various spots in the 383 * implementation, along with a few random constants that have no meaning 384 * outside of the implementation. There is no real structure to this cpp 385 * mishmash -- but is there ever? 386 */ 387 #define DTRACE_HASHSTR(hash, probe) \ 388 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 389 390 #define DTRACE_HASHNEXT(hash, probe) \ 391 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 392 393 #define DTRACE_HASHPREV(hash, probe) \ 394 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 395 396 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 397 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 398 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 399 400 #define DTRACE_AGGHASHSIZE_SLEW 17 401 402 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 403 404 /* 405 * The key for a thread-local variable consists of the lower 61 bits of the 406 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 407 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 408 * equal to a variable identifier. This is necessary (but not sufficient) to 409 * assure that global associative arrays never collide with thread-local 410 * variables. To guarantee that they cannot collide, we must also define the 411 * order for keying dynamic variables. That order is: 412 * 413 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 414 * 415 * Because the variable-key and the tls-key are in orthogonal spaces, there is 416 * no way for a global variable key signature to match a thread-local key 417 * signature. 418 */ 419 #if defined(sun) 420 #define DTRACE_TLS_THRKEY(where) { \ 421 uint_t intr = 0; \ 422 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 423 for (; actv; actv >>= 1) \ 424 intr++; \ 425 ASSERT(intr < (1 << 3)); \ 426 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 427 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 428 } 429 #else 430 #define DTRACE_TLS_THRKEY(where) { \ 431 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 432 uint_t intr = 0; \ 433 uint_t actv = _c->cpu_intr_actv; \ 434 for (; actv; actv >>= 1) \ 435 intr++; \ 436 ASSERT(intr < (1 << 3)); \ 437 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 438 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 439 } 440 #endif 441 442 #define DT_BSWAP_8(x) ((x) & 0xff) 443 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 444 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 445 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 446 447 #define DT_MASK_LO 0x00000000FFFFFFFFULL 448 449 #define DTRACE_STORE(type, tomax, offset, what) \ 450 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 451 452 #ifndef __x86 453 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 454 if (addr & (size - 1)) { \ 455 *flags |= CPU_DTRACE_BADALIGN; \ 456 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 457 return (0); \ 458 } 459 #else 460 #define DTRACE_ALIGNCHECK(addr, size, flags) 461 #endif 462 463 /* 464 * Test whether a range of memory starting at testaddr of size testsz falls 465 * within the range of memory described by addr, sz. We take care to avoid 466 * problems with overflow and underflow of the unsigned quantities, and 467 * disallow all negative sizes. Ranges of size 0 are allowed. 468 */ 469 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 470 ((testaddr) - (baseaddr) < (basesz) && \ 471 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 472 (testaddr) + (testsz) >= (testaddr)) 473 474 /* 475 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 476 * alloc_sz on the righthand side of the comparison in order to avoid overflow 477 * or underflow in the comparison with it. This is simpler than the INRANGE 478 * check above, because we know that the dtms_scratch_ptr is valid in the 479 * range. Allocations of size zero are allowed. 480 */ 481 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 482 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 483 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 484 485 #define DTRACE_LOADFUNC(bits) \ 486 /*CSTYLED*/ \ 487 uint##bits##_t \ 488 dtrace_load##bits(uintptr_t addr) \ 489 { \ 490 size_t size = bits / NBBY; \ 491 /*CSTYLED*/ \ 492 uint##bits##_t rval; \ 493 int i; \ 494 volatile uint16_t *flags = (volatile uint16_t *) \ 495 &cpu_core[curcpu].cpuc_dtrace_flags; \ 496 \ 497 DTRACE_ALIGNCHECK(addr, size, flags); \ 498 \ 499 for (i = 0; i < dtrace_toxranges; i++) { \ 500 if (addr >= dtrace_toxrange[i].dtt_limit) \ 501 continue; \ 502 \ 503 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 504 continue; \ 505 \ 506 /* \ 507 * This address falls within a toxic region; return 0. \ 508 */ \ 509 *flags |= CPU_DTRACE_BADADDR; \ 510 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 511 return (0); \ 512 } \ 513 \ 514 *flags |= CPU_DTRACE_NOFAULT; \ 515 /*CSTYLED*/ \ 516 rval = *((volatile uint##bits##_t *)addr); \ 517 *flags &= ~CPU_DTRACE_NOFAULT; \ 518 \ 519 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 520 } 521 522 #ifdef _LP64 523 #define dtrace_loadptr dtrace_load64 524 #else 525 #define dtrace_loadptr dtrace_load32 526 #endif 527 528 #define DTRACE_DYNHASH_FREE 0 529 #define DTRACE_DYNHASH_SINK 1 530 #define DTRACE_DYNHASH_VALID 2 531 532 #define DTRACE_MATCH_NEXT 0 533 #define DTRACE_MATCH_DONE 1 534 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 535 #define DTRACE_STATE_ALIGN 64 536 537 #define DTRACE_FLAGS2FLT(flags) \ 538 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 539 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 540 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 541 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 542 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 543 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 544 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 545 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 546 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 547 DTRACEFLT_UNKNOWN) 548 549 #define DTRACEACT_ISSTRING(act) \ 550 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 551 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 552 553 /* Function prototype definitions: */ 554 static size_t dtrace_strlen(const char *, size_t); 555 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 556 static void dtrace_enabling_provide(dtrace_provider_t *); 557 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 558 static void dtrace_enabling_matchall(void); 559 static void dtrace_enabling_reap(void); 560 static dtrace_state_t *dtrace_anon_grab(void); 561 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 562 dtrace_state_t *, uint64_t, uint64_t); 563 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 564 static void dtrace_buffer_drop(dtrace_buffer_t *); 565 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 566 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 567 dtrace_state_t *, dtrace_mstate_t *); 568 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 569 dtrace_optval_t); 570 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 571 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 572 uint16_t dtrace_load16(uintptr_t); 573 uint32_t dtrace_load32(uintptr_t); 574 uint64_t dtrace_load64(uintptr_t); 575 uint8_t dtrace_load8(uintptr_t); 576 void dtrace_dynvar_clean(dtrace_dstate_t *); 577 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 578 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 579 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 580 581 /* 582 * DTrace Probe Context Functions 583 * 584 * These functions are called from probe context. Because probe context is 585 * any context in which C may be called, arbitrarily locks may be held, 586 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 587 * As a result, functions called from probe context may only call other DTrace 588 * support functions -- they may not interact at all with the system at large. 589 * (Note that the ASSERT macro is made probe-context safe by redefining it in 590 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 591 * loads are to be performed from probe context, they _must_ be in terms of 592 * the safe dtrace_load*() variants. 593 * 594 * Some functions in this block are not actually called from probe context; 595 * for these functions, there will be a comment above the function reading 596 * "Note: not called from probe context." 597 */ 598 void 599 dtrace_panic(const char *format, ...) 600 { 601 va_list alist; 602 603 va_start(alist, format); 604 dtrace_vpanic(format, alist); 605 va_end(alist); 606 } 607 608 int 609 dtrace_assfail(const char *a, const char *f, int l) 610 { 611 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 612 613 /* 614 * We just need something here that even the most clever compiler 615 * cannot optimize away. 616 */ 617 return (a[(uintptr_t)f]); 618 } 619 620 /* 621 * Atomically increment a specified error counter from probe context. 622 */ 623 static void 624 dtrace_error(uint32_t *counter) 625 { 626 /* 627 * Most counters stored to in probe context are per-CPU counters. 628 * However, there are some error conditions that are sufficiently 629 * arcane that they don't merit per-CPU storage. If these counters 630 * are incremented concurrently on different CPUs, scalability will be 631 * adversely affected -- but we don't expect them to be white-hot in a 632 * correctly constructed enabling... 633 */ 634 uint32_t oval, nval; 635 636 do { 637 oval = *counter; 638 639 if ((nval = oval + 1) == 0) { 640 /* 641 * If the counter would wrap, set it to 1 -- assuring 642 * that the counter is never zero when we have seen 643 * errors. (The counter must be 32-bits because we 644 * aren't guaranteed a 64-bit compare&swap operation.) 645 * To save this code both the infamy of being fingered 646 * by a priggish news story and the indignity of being 647 * the target of a neo-puritan witch trial, we're 648 * carefully avoiding any colorful description of the 649 * likelihood of this condition -- but suffice it to 650 * say that it is only slightly more likely than the 651 * overflow of predicate cache IDs, as discussed in 652 * dtrace_predicate_create(). 653 */ 654 nval = 1; 655 } 656 } while (dtrace_cas32(counter, oval, nval) != oval); 657 } 658 659 /* 660 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 661 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 662 */ 663 DTRACE_LOADFUNC(8) 664 DTRACE_LOADFUNC(16) 665 DTRACE_LOADFUNC(32) 666 DTRACE_LOADFUNC(64) 667 668 static int 669 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 670 { 671 if (dest < mstate->dtms_scratch_base) 672 return (0); 673 674 if (dest + size < dest) 675 return (0); 676 677 if (dest + size > mstate->dtms_scratch_ptr) 678 return (0); 679 680 return (1); 681 } 682 683 static int 684 dtrace_canstore_statvar(uint64_t addr, size_t sz, 685 dtrace_statvar_t **svars, int nsvars) 686 { 687 int i; 688 689 for (i = 0; i < nsvars; i++) { 690 dtrace_statvar_t *svar = svars[i]; 691 692 if (svar == NULL || svar->dtsv_size == 0) 693 continue; 694 695 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 696 return (1); 697 } 698 699 return (0); 700 } 701 702 /* 703 * Check to see if the address is within a memory region to which a store may 704 * be issued. This includes the DTrace scratch areas, and any DTrace variable 705 * region. The caller of dtrace_canstore() is responsible for performing any 706 * alignment checks that are needed before stores are actually executed. 707 */ 708 static int 709 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 710 dtrace_vstate_t *vstate) 711 { 712 /* 713 * First, check to see if the address is in scratch space... 714 */ 715 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 716 mstate->dtms_scratch_size)) 717 return (1); 718 719 /* 720 * Now check to see if it's a dynamic variable. This check will pick 721 * up both thread-local variables and any global dynamically-allocated 722 * variables. 723 */ 724 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 725 vstate->dtvs_dynvars.dtds_size)) { 726 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 727 uintptr_t base = (uintptr_t)dstate->dtds_base + 728 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 729 uintptr_t chunkoffs; 730 731 /* 732 * Before we assume that we can store here, we need to make 733 * sure that it isn't in our metadata -- storing to our 734 * dynamic variable metadata would corrupt our state. For 735 * the range to not include any dynamic variable metadata, 736 * it must: 737 * 738 * (1) Start above the hash table that is at the base of 739 * the dynamic variable space 740 * 741 * (2) Have a starting chunk offset that is beyond the 742 * dtrace_dynvar_t that is at the base of every chunk 743 * 744 * (3) Not span a chunk boundary 745 * 746 */ 747 if (addr < base) 748 return (0); 749 750 chunkoffs = (addr - base) % dstate->dtds_chunksize; 751 752 if (chunkoffs < sizeof (dtrace_dynvar_t)) 753 return (0); 754 755 if (chunkoffs + sz > dstate->dtds_chunksize) 756 return (0); 757 758 return (1); 759 } 760 761 /* 762 * Finally, check the static local and global variables. These checks 763 * take the longest, so we perform them last. 764 */ 765 if (dtrace_canstore_statvar(addr, sz, 766 vstate->dtvs_locals, vstate->dtvs_nlocals)) 767 return (1); 768 769 if (dtrace_canstore_statvar(addr, sz, 770 vstate->dtvs_globals, vstate->dtvs_nglobals)) 771 return (1); 772 773 return (0); 774 } 775 776 777 /* 778 * Convenience routine to check to see if the address is within a memory 779 * region in which a load may be issued given the user's privilege level; 780 * if not, it sets the appropriate error flags and loads 'addr' into the 781 * illegal value slot. 782 * 783 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 784 * appropriate memory access protection. 785 */ 786 static int 787 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 788 dtrace_vstate_t *vstate) 789 { 790 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 791 792 /* 793 * If we hold the privilege to read from kernel memory, then 794 * everything is readable. 795 */ 796 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 797 return (1); 798 799 /* 800 * You can obviously read that which you can store. 801 */ 802 if (dtrace_canstore(addr, sz, mstate, vstate)) 803 return (1); 804 805 /* 806 * We're allowed to read from our own string table. 807 */ 808 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 809 mstate->dtms_difo->dtdo_strlen)) 810 return (1); 811 812 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 813 *illval = addr; 814 return (0); 815 } 816 817 /* 818 * Convenience routine to check to see if a given string is within a memory 819 * region in which a load may be issued given the user's privilege level; 820 * this exists so that we don't need to issue unnecessary dtrace_strlen() 821 * calls in the event that the user has all privileges. 822 */ 823 static int 824 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 825 dtrace_vstate_t *vstate) 826 { 827 size_t strsz; 828 829 /* 830 * If we hold the privilege to read from kernel memory, then 831 * everything is readable. 832 */ 833 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 834 return (1); 835 836 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 837 if (dtrace_canload(addr, strsz, mstate, vstate)) 838 return (1); 839 840 return (0); 841 } 842 843 /* 844 * Convenience routine to check to see if a given variable is within a memory 845 * region in which a load may be issued given the user's privilege level. 846 */ 847 static int 848 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 849 dtrace_vstate_t *vstate) 850 { 851 size_t sz; 852 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 853 854 /* 855 * If we hold the privilege to read from kernel memory, then 856 * everything is readable. 857 */ 858 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 859 return (1); 860 861 if (type->dtdt_kind == DIF_TYPE_STRING) 862 sz = dtrace_strlen(src, 863 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 864 else 865 sz = type->dtdt_size; 866 867 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 868 } 869 870 /* 871 * Compare two strings using safe loads. 872 */ 873 static int 874 dtrace_strncmp(char *s1, char *s2, size_t limit) 875 { 876 uint8_t c1, c2; 877 volatile uint16_t *flags; 878 879 if (s1 == s2 || limit == 0) 880 return (0); 881 882 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 883 884 do { 885 if (s1 == NULL) { 886 c1 = '\0'; 887 } else { 888 c1 = dtrace_load8((uintptr_t)s1++); 889 } 890 891 if (s2 == NULL) { 892 c2 = '\0'; 893 } else { 894 c2 = dtrace_load8((uintptr_t)s2++); 895 } 896 897 if (c1 != c2) 898 return (c1 - c2); 899 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 900 901 return (0); 902 } 903 904 /* 905 * Compute strlen(s) for a string using safe memory accesses. The additional 906 * len parameter is used to specify a maximum length to ensure completion. 907 */ 908 static size_t 909 dtrace_strlen(const char *s, size_t lim) 910 { 911 uint_t len; 912 913 for (len = 0; len != lim; len++) { 914 if (dtrace_load8((uintptr_t)s++) == '\0') 915 break; 916 } 917 918 return (len); 919 } 920 921 /* 922 * Check if an address falls within a toxic region. 923 */ 924 static int 925 dtrace_istoxic(uintptr_t kaddr, size_t size) 926 { 927 uintptr_t taddr, tsize; 928 int i; 929 930 for (i = 0; i < dtrace_toxranges; i++) { 931 taddr = dtrace_toxrange[i].dtt_base; 932 tsize = dtrace_toxrange[i].dtt_limit - taddr; 933 934 if (kaddr - taddr < tsize) { 935 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 936 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 937 return (1); 938 } 939 940 if (taddr - kaddr < size) { 941 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 942 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 943 return (1); 944 } 945 } 946 947 return (0); 948 } 949 950 /* 951 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 952 * memory specified by the DIF program. The dst is assumed to be safe memory 953 * that we can store to directly because it is managed by DTrace. As with 954 * standard bcopy, overlapping copies are handled properly. 955 */ 956 static void 957 dtrace_bcopy(const void *src, void *dst, size_t len) 958 { 959 if (len != 0) { 960 uint8_t *s1 = dst; 961 const uint8_t *s2 = src; 962 963 if (s1 <= s2) { 964 do { 965 *s1++ = dtrace_load8((uintptr_t)s2++); 966 } while (--len != 0); 967 } else { 968 s2 += len; 969 s1 += len; 970 971 do { 972 *--s1 = dtrace_load8((uintptr_t)--s2); 973 } while (--len != 0); 974 } 975 } 976 } 977 978 /* 979 * Copy src to dst using safe memory accesses, up to either the specified 980 * length, or the point that a nul byte is encountered. The src is assumed to 981 * be unsafe memory specified by the DIF program. The dst is assumed to be 982 * safe memory that we can store to directly because it is managed by DTrace. 983 * Unlike dtrace_bcopy(), overlapping regions are not handled. 984 */ 985 static void 986 dtrace_strcpy(const void *src, void *dst, size_t len) 987 { 988 if (len != 0) { 989 uint8_t *s1 = dst, c; 990 const uint8_t *s2 = src; 991 992 do { 993 *s1++ = c = dtrace_load8((uintptr_t)s2++); 994 } while (--len != 0 && c != '\0'); 995 } 996 } 997 998 /* 999 * Copy src to dst, deriving the size and type from the specified (BYREF) 1000 * variable type. The src is assumed to be unsafe memory specified by the DIF 1001 * program. The dst is assumed to be DTrace variable memory that is of the 1002 * specified type; we assume that we can store to directly. 1003 */ 1004 static void 1005 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1006 { 1007 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1008 1009 if (type->dtdt_kind == DIF_TYPE_STRING) { 1010 dtrace_strcpy(src, dst, type->dtdt_size); 1011 } else { 1012 dtrace_bcopy(src, dst, type->dtdt_size); 1013 } 1014 } 1015 1016 /* 1017 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1018 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1019 * safe memory that we can access directly because it is managed by DTrace. 1020 */ 1021 static int 1022 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1023 { 1024 volatile uint16_t *flags; 1025 1026 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1027 1028 if (s1 == s2) 1029 return (0); 1030 1031 if (s1 == NULL || s2 == NULL) 1032 return (1); 1033 1034 if (s1 != s2 && len != 0) { 1035 const uint8_t *ps1 = s1; 1036 const uint8_t *ps2 = s2; 1037 1038 do { 1039 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1040 return (1); 1041 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1042 } 1043 return (0); 1044 } 1045 1046 /* 1047 * Zero the specified region using a simple byte-by-byte loop. Note that this 1048 * is for safe DTrace-managed memory only. 1049 */ 1050 static void 1051 dtrace_bzero(void *dst, size_t len) 1052 { 1053 uchar_t *cp; 1054 1055 for (cp = dst; len != 0; len--) 1056 *cp++ = 0; 1057 } 1058 1059 static void 1060 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1061 { 1062 uint64_t result[2]; 1063 1064 result[0] = addend1[0] + addend2[0]; 1065 result[1] = addend1[1] + addend2[1] + 1066 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1067 1068 sum[0] = result[0]; 1069 sum[1] = result[1]; 1070 } 1071 1072 /* 1073 * Shift the 128-bit value in a by b. If b is positive, shift left. 1074 * If b is negative, shift right. 1075 */ 1076 static void 1077 dtrace_shift_128(uint64_t *a, int b) 1078 { 1079 uint64_t mask; 1080 1081 if (b == 0) 1082 return; 1083 1084 if (b < 0) { 1085 b = -b; 1086 if (b >= 64) { 1087 a[0] = a[1] >> (b - 64); 1088 a[1] = 0; 1089 } else { 1090 a[0] >>= b; 1091 mask = 1LL << (64 - b); 1092 mask -= 1; 1093 a[0] |= ((a[1] & mask) << (64 - b)); 1094 a[1] >>= b; 1095 } 1096 } else { 1097 if (b >= 64) { 1098 a[1] = a[0] << (b - 64); 1099 a[0] = 0; 1100 } else { 1101 a[1] <<= b; 1102 mask = a[0] >> (64 - b); 1103 a[1] |= mask; 1104 a[0] <<= b; 1105 } 1106 } 1107 } 1108 1109 /* 1110 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1111 * use native multiplication on those, and then re-combine into the 1112 * resulting 128-bit value. 1113 * 1114 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1115 * hi1 * hi2 << 64 + 1116 * hi1 * lo2 << 32 + 1117 * hi2 * lo1 << 32 + 1118 * lo1 * lo2 1119 */ 1120 static void 1121 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1122 { 1123 uint64_t hi1, hi2, lo1, lo2; 1124 uint64_t tmp[2]; 1125 1126 hi1 = factor1 >> 32; 1127 hi2 = factor2 >> 32; 1128 1129 lo1 = factor1 & DT_MASK_LO; 1130 lo2 = factor2 & DT_MASK_LO; 1131 1132 product[0] = lo1 * lo2; 1133 product[1] = hi1 * hi2; 1134 1135 tmp[0] = hi1 * lo2; 1136 tmp[1] = 0; 1137 dtrace_shift_128(tmp, 32); 1138 dtrace_add_128(product, tmp, product); 1139 1140 tmp[0] = hi2 * lo1; 1141 tmp[1] = 0; 1142 dtrace_shift_128(tmp, 32); 1143 dtrace_add_128(product, tmp, product); 1144 } 1145 1146 /* 1147 * This privilege check should be used by actions and subroutines to 1148 * verify that the user credentials of the process that enabled the 1149 * invoking ECB match the target credentials 1150 */ 1151 static int 1152 dtrace_priv_proc_common_user(dtrace_state_t *state) 1153 { 1154 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1155 1156 /* 1157 * We should always have a non-NULL state cred here, since if cred 1158 * is null (anonymous tracing), we fast-path bypass this routine. 1159 */ 1160 ASSERT(s_cr != NULL); 1161 1162 if ((cr = CRED()) != NULL && 1163 s_cr->cr_uid == cr->cr_uid && 1164 s_cr->cr_uid == cr->cr_ruid && 1165 s_cr->cr_uid == cr->cr_suid && 1166 s_cr->cr_gid == cr->cr_gid && 1167 s_cr->cr_gid == cr->cr_rgid && 1168 s_cr->cr_gid == cr->cr_sgid) 1169 return (1); 1170 1171 return (0); 1172 } 1173 1174 /* 1175 * This privilege check should be used by actions and subroutines to 1176 * verify that the zone of the process that enabled the invoking ECB 1177 * matches the target credentials 1178 */ 1179 static int 1180 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1181 { 1182 #if defined(sun) 1183 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1184 1185 /* 1186 * We should always have a non-NULL state cred here, since if cred 1187 * is null (anonymous tracing), we fast-path bypass this routine. 1188 */ 1189 ASSERT(s_cr != NULL); 1190 1191 if ((cr = CRED()) != NULL && 1192 s_cr->cr_zone == cr->cr_zone) 1193 return (1); 1194 1195 return (0); 1196 #else 1197 return (1); 1198 #endif 1199 } 1200 1201 /* 1202 * This privilege check should be used by actions and subroutines to 1203 * verify that the process has not setuid or changed credentials. 1204 */ 1205 static int 1206 dtrace_priv_proc_common_nocd(void) 1207 { 1208 proc_t *proc; 1209 1210 if ((proc = ttoproc(curthread)) != NULL && 1211 !(proc->p_flag & SNOCD)) 1212 return (1); 1213 1214 return (0); 1215 } 1216 1217 static int 1218 dtrace_priv_proc_destructive(dtrace_state_t *state) 1219 { 1220 int action = state->dts_cred.dcr_action; 1221 1222 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1223 dtrace_priv_proc_common_zone(state) == 0) 1224 goto bad; 1225 1226 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1227 dtrace_priv_proc_common_user(state) == 0) 1228 goto bad; 1229 1230 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1231 dtrace_priv_proc_common_nocd() == 0) 1232 goto bad; 1233 1234 return (1); 1235 1236 bad: 1237 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1238 1239 return (0); 1240 } 1241 1242 static int 1243 dtrace_priv_proc_control(dtrace_state_t *state) 1244 { 1245 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1246 return (1); 1247 1248 if (dtrace_priv_proc_common_zone(state) && 1249 dtrace_priv_proc_common_user(state) && 1250 dtrace_priv_proc_common_nocd()) 1251 return (1); 1252 1253 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1254 1255 return (0); 1256 } 1257 1258 static int 1259 dtrace_priv_proc(dtrace_state_t *state) 1260 { 1261 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1262 return (1); 1263 1264 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1265 1266 return (0); 1267 } 1268 1269 static int 1270 dtrace_priv_kernel(dtrace_state_t *state) 1271 { 1272 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1273 return (1); 1274 1275 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1276 1277 return (0); 1278 } 1279 1280 static int 1281 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1282 { 1283 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1284 return (1); 1285 1286 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1287 1288 return (0); 1289 } 1290 1291 /* 1292 * Note: not called from probe context. This function is called 1293 * asynchronously (and at a regular interval) from outside of probe context to 1294 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1295 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1296 */ 1297 void 1298 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1299 { 1300 dtrace_dynvar_t *dirty; 1301 dtrace_dstate_percpu_t *dcpu; 1302 int i, work = 0; 1303 1304 for (i = 0; i < NCPU; i++) { 1305 dcpu = &dstate->dtds_percpu[i]; 1306 1307 ASSERT(dcpu->dtdsc_rinsing == NULL); 1308 1309 /* 1310 * If the dirty list is NULL, there is no dirty work to do. 1311 */ 1312 if (dcpu->dtdsc_dirty == NULL) 1313 continue; 1314 1315 /* 1316 * If the clean list is non-NULL, then we're not going to do 1317 * any work for this CPU -- it means that there has not been 1318 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1319 * since the last time we cleaned house. 1320 */ 1321 if (dcpu->dtdsc_clean != NULL) 1322 continue; 1323 1324 work = 1; 1325 1326 /* 1327 * Atomically move the dirty list aside. 1328 */ 1329 do { 1330 dirty = dcpu->dtdsc_dirty; 1331 1332 /* 1333 * Before we zap the dirty list, set the rinsing list. 1334 * (This allows for a potential assertion in 1335 * dtrace_dynvar(): if a free dynamic variable appears 1336 * on a hash chain, either the dirty list or the 1337 * rinsing list for some CPU must be non-NULL.) 1338 */ 1339 dcpu->dtdsc_rinsing = dirty; 1340 dtrace_membar_producer(); 1341 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1342 dirty, NULL) != dirty); 1343 } 1344 1345 if (!work) { 1346 /* 1347 * We have no work to do; we can simply return. 1348 */ 1349 return; 1350 } 1351 1352 dtrace_sync(); 1353 1354 for (i = 0; i < NCPU; i++) { 1355 dcpu = &dstate->dtds_percpu[i]; 1356 1357 if (dcpu->dtdsc_rinsing == NULL) 1358 continue; 1359 1360 /* 1361 * We are now guaranteed that no hash chain contains a pointer 1362 * into this dirty list; we can make it clean. 1363 */ 1364 ASSERT(dcpu->dtdsc_clean == NULL); 1365 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1366 dcpu->dtdsc_rinsing = NULL; 1367 } 1368 1369 /* 1370 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1371 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1372 * This prevents a race whereby a CPU incorrectly decides that 1373 * the state should be something other than DTRACE_DSTATE_CLEAN 1374 * after dtrace_dynvar_clean() has completed. 1375 */ 1376 dtrace_sync(); 1377 1378 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1379 } 1380 1381 /* 1382 * Depending on the value of the op parameter, this function looks-up, 1383 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1384 * allocation is requested, this function will return a pointer to a 1385 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1386 * variable can be allocated. If NULL is returned, the appropriate counter 1387 * will be incremented. 1388 */ 1389 dtrace_dynvar_t * 1390 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1391 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1392 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1393 { 1394 uint64_t hashval = DTRACE_DYNHASH_VALID; 1395 dtrace_dynhash_t *hash = dstate->dtds_hash; 1396 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1397 processorid_t me = curcpu, cpu = me; 1398 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1399 size_t bucket, ksize; 1400 size_t chunksize = dstate->dtds_chunksize; 1401 uintptr_t kdata, lock, nstate; 1402 uint_t i; 1403 1404 ASSERT(nkeys != 0); 1405 1406 /* 1407 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1408 * algorithm. For the by-value portions, we perform the algorithm in 1409 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1410 * bit, and seems to have only a minute effect on distribution. For 1411 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1412 * over each referenced byte. It's painful to do this, but it's much 1413 * better than pathological hash distribution. The efficacy of the 1414 * hashing algorithm (and a comparison with other algorithms) may be 1415 * found by running the ::dtrace_dynstat MDB dcmd. 1416 */ 1417 for (i = 0; i < nkeys; i++) { 1418 if (key[i].dttk_size == 0) { 1419 uint64_t val = key[i].dttk_value; 1420 1421 hashval += (val >> 48) & 0xffff; 1422 hashval += (hashval << 10); 1423 hashval ^= (hashval >> 6); 1424 1425 hashval += (val >> 32) & 0xffff; 1426 hashval += (hashval << 10); 1427 hashval ^= (hashval >> 6); 1428 1429 hashval += (val >> 16) & 0xffff; 1430 hashval += (hashval << 10); 1431 hashval ^= (hashval >> 6); 1432 1433 hashval += val & 0xffff; 1434 hashval += (hashval << 10); 1435 hashval ^= (hashval >> 6); 1436 } else { 1437 /* 1438 * This is incredibly painful, but it beats the hell 1439 * out of the alternative. 1440 */ 1441 uint64_t j, size = key[i].dttk_size; 1442 uintptr_t base = (uintptr_t)key[i].dttk_value; 1443 1444 if (!dtrace_canload(base, size, mstate, vstate)) 1445 break; 1446 1447 for (j = 0; j < size; j++) { 1448 hashval += dtrace_load8(base + j); 1449 hashval += (hashval << 10); 1450 hashval ^= (hashval >> 6); 1451 } 1452 } 1453 } 1454 1455 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1456 return (NULL); 1457 1458 hashval += (hashval << 3); 1459 hashval ^= (hashval >> 11); 1460 hashval += (hashval << 15); 1461 1462 /* 1463 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1464 * comes out to be one of our two sentinel hash values. If this 1465 * actually happens, we set the hashval to be a value known to be a 1466 * non-sentinel value. 1467 */ 1468 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1469 hashval = DTRACE_DYNHASH_VALID; 1470 1471 /* 1472 * Yes, it's painful to do a divide here. If the cycle count becomes 1473 * important here, tricks can be pulled to reduce it. (However, it's 1474 * critical that hash collisions be kept to an absolute minimum; 1475 * they're much more painful than a divide.) It's better to have a 1476 * solution that generates few collisions and still keeps things 1477 * relatively simple. 1478 */ 1479 bucket = hashval % dstate->dtds_hashsize; 1480 1481 if (op == DTRACE_DYNVAR_DEALLOC) { 1482 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1483 1484 for (;;) { 1485 while ((lock = *lockp) & 1) 1486 continue; 1487 1488 if (dtrace_casptr((volatile void *)lockp, 1489 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1490 break; 1491 } 1492 1493 dtrace_membar_producer(); 1494 } 1495 1496 top: 1497 prev = NULL; 1498 lock = hash[bucket].dtdh_lock; 1499 1500 dtrace_membar_consumer(); 1501 1502 start = hash[bucket].dtdh_chain; 1503 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1504 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1505 op != DTRACE_DYNVAR_DEALLOC)); 1506 1507 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1508 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1509 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1510 1511 if (dvar->dtdv_hashval != hashval) { 1512 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1513 /* 1514 * We've reached the sink, and therefore the 1515 * end of the hash chain; we can kick out of 1516 * the loop knowing that we have seen a valid 1517 * snapshot of state. 1518 */ 1519 ASSERT(dvar->dtdv_next == NULL); 1520 ASSERT(dvar == &dtrace_dynhash_sink); 1521 break; 1522 } 1523 1524 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1525 /* 1526 * We've gone off the rails: somewhere along 1527 * the line, one of the members of this hash 1528 * chain was deleted. Note that we could also 1529 * detect this by simply letting this loop run 1530 * to completion, as we would eventually hit 1531 * the end of the dirty list. However, we 1532 * want to avoid running the length of the 1533 * dirty list unnecessarily (it might be quite 1534 * long), so we catch this as early as 1535 * possible by detecting the hash marker. In 1536 * this case, we simply set dvar to NULL and 1537 * break; the conditional after the loop will 1538 * send us back to top. 1539 */ 1540 dvar = NULL; 1541 break; 1542 } 1543 1544 goto next; 1545 } 1546 1547 if (dtuple->dtt_nkeys != nkeys) 1548 goto next; 1549 1550 for (i = 0; i < nkeys; i++, dkey++) { 1551 if (dkey->dttk_size != key[i].dttk_size) 1552 goto next; /* size or type mismatch */ 1553 1554 if (dkey->dttk_size != 0) { 1555 if (dtrace_bcmp( 1556 (void *)(uintptr_t)key[i].dttk_value, 1557 (void *)(uintptr_t)dkey->dttk_value, 1558 dkey->dttk_size)) 1559 goto next; 1560 } else { 1561 if (dkey->dttk_value != key[i].dttk_value) 1562 goto next; 1563 } 1564 } 1565 1566 if (op != DTRACE_DYNVAR_DEALLOC) 1567 return (dvar); 1568 1569 ASSERT(dvar->dtdv_next == NULL || 1570 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1571 1572 if (prev != NULL) { 1573 ASSERT(hash[bucket].dtdh_chain != dvar); 1574 ASSERT(start != dvar); 1575 ASSERT(prev->dtdv_next == dvar); 1576 prev->dtdv_next = dvar->dtdv_next; 1577 } else { 1578 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1579 start, dvar->dtdv_next) != start) { 1580 /* 1581 * We have failed to atomically swing the 1582 * hash table head pointer, presumably because 1583 * of a conflicting allocation on another CPU. 1584 * We need to reread the hash chain and try 1585 * again. 1586 */ 1587 goto top; 1588 } 1589 } 1590 1591 dtrace_membar_producer(); 1592 1593 /* 1594 * Now set the hash value to indicate that it's free. 1595 */ 1596 ASSERT(hash[bucket].dtdh_chain != dvar); 1597 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1598 1599 dtrace_membar_producer(); 1600 1601 /* 1602 * Set the next pointer to point at the dirty list, and 1603 * atomically swing the dirty pointer to the newly freed dvar. 1604 */ 1605 do { 1606 next = dcpu->dtdsc_dirty; 1607 dvar->dtdv_next = next; 1608 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1609 1610 /* 1611 * Finally, unlock this hash bucket. 1612 */ 1613 ASSERT(hash[bucket].dtdh_lock == lock); 1614 ASSERT(lock & 1); 1615 hash[bucket].dtdh_lock++; 1616 1617 return (NULL); 1618 next: 1619 prev = dvar; 1620 continue; 1621 } 1622 1623 if (dvar == NULL) { 1624 /* 1625 * If dvar is NULL, it is because we went off the rails: 1626 * one of the elements that we traversed in the hash chain 1627 * was deleted while we were traversing it. In this case, 1628 * we assert that we aren't doing a dealloc (deallocs lock 1629 * the hash bucket to prevent themselves from racing with 1630 * one another), and retry the hash chain traversal. 1631 */ 1632 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1633 goto top; 1634 } 1635 1636 if (op != DTRACE_DYNVAR_ALLOC) { 1637 /* 1638 * If we are not to allocate a new variable, we want to 1639 * return NULL now. Before we return, check that the value 1640 * of the lock word hasn't changed. If it has, we may have 1641 * seen an inconsistent snapshot. 1642 */ 1643 if (op == DTRACE_DYNVAR_NOALLOC) { 1644 if (hash[bucket].dtdh_lock != lock) 1645 goto top; 1646 } else { 1647 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1648 ASSERT(hash[bucket].dtdh_lock == lock); 1649 ASSERT(lock & 1); 1650 hash[bucket].dtdh_lock++; 1651 } 1652 1653 return (NULL); 1654 } 1655 1656 /* 1657 * We need to allocate a new dynamic variable. The size we need is the 1658 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1659 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1660 * the size of any referred-to data (dsize). We then round the final 1661 * size up to the chunksize for allocation. 1662 */ 1663 for (ksize = 0, i = 0; i < nkeys; i++) 1664 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1665 1666 /* 1667 * This should be pretty much impossible, but could happen if, say, 1668 * strange DIF specified the tuple. Ideally, this should be an 1669 * assertion and not an error condition -- but that requires that the 1670 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1671 * bullet-proof. (That is, it must not be able to be fooled by 1672 * malicious DIF.) Given the lack of backwards branches in DIF, 1673 * solving this would presumably not amount to solving the Halting 1674 * Problem -- but it still seems awfully hard. 1675 */ 1676 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1677 ksize + dsize > chunksize) { 1678 dcpu->dtdsc_drops++; 1679 return (NULL); 1680 } 1681 1682 nstate = DTRACE_DSTATE_EMPTY; 1683 1684 do { 1685 retry: 1686 free = dcpu->dtdsc_free; 1687 1688 if (free == NULL) { 1689 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1690 void *rval; 1691 1692 if (clean == NULL) { 1693 /* 1694 * We're out of dynamic variable space on 1695 * this CPU. Unless we have tried all CPUs, 1696 * we'll try to allocate from a different 1697 * CPU. 1698 */ 1699 switch (dstate->dtds_state) { 1700 case DTRACE_DSTATE_CLEAN: { 1701 void *sp = &dstate->dtds_state; 1702 1703 if (++cpu >= NCPU) 1704 cpu = 0; 1705 1706 if (dcpu->dtdsc_dirty != NULL && 1707 nstate == DTRACE_DSTATE_EMPTY) 1708 nstate = DTRACE_DSTATE_DIRTY; 1709 1710 if (dcpu->dtdsc_rinsing != NULL) 1711 nstate = DTRACE_DSTATE_RINSING; 1712 1713 dcpu = &dstate->dtds_percpu[cpu]; 1714 1715 if (cpu != me) 1716 goto retry; 1717 1718 (void) dtrace_cas32(sp, 1719 DTRACE_DSTATE_CLEAN, nstate); 1720 1721 /* 1722 * To increment the correct bean 1723 * counter, take another lap. 1724 */ 1725 goto retry; 1726 } 1727 1728 case DTRACE_DSTATE_DIRTY: 1729 dcpu->dtdsc_dirty_drops++; 1730 break; 1731 1732 case DTRACE_DSTATE_RINSING: 1733 dcpu->dtdsc_rinsing_drops++; 1734 break; 1735 1736 case DTRACE_DSTATE_EMPTY: 1737 dcpu->dtdsc_drops++; 1738 break; 1739 } 1740 1741 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1742 return (NULL); 1743 } 1744 1745 /* 1746 * The clean list appears to be non-empty. We want to 1747 * move the clean list to the free list; we start by 1748 * moving the clean pointer aside. 1749 */ 1750 if (dtrace_casptr(&dcpu->dtdsc_clean, 1751 clean, NULL) != clean) { 1752 /* 1753 * We are in one of two situations: 1754 * 1755 * (a) The clean list was switched to the 1756 * free list by another CPU. 1757 * 1758 * (b) The clean list was added to by the 1759 * cleansing cyclic. 1760 * 1761 * In either of these situations, we can 1762 * just reattempt the free list allocation. 1763 */ 1764 goto retry; 1765 } 1766 1767 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1768 1769 /* 1770 * Now we'll move the clean list to the free list. 1771 * It's impossible for this to fail: the only way 1772 * the free list can be updated is through this 1773 * code path, and only one CPU can own the clean list. 1774 * Thus, it would only be possible for this to fail if 1775 * this code were racing with dtrace_dynvar_clean(). 1776 * (That is, if dtrace_dynvar_clean() updated the clean 1777 * list, and we ended up racing to update the free 1778 * list.) This race is prevented by the dtrace_sync() 1779 * in dtrace_dynvar_clean() -- which flushes the 1780 * owners of the clean lists out before resetting 1781 * the clean lists. 1782 */ 1783 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1784 ASSERT(rval == NULL); 1785 goto retry; 1786 } 1787 1788 dvar = free; 1789 new_free = dvar->dtdv_next; 1790 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1791 1792 /* 1793 * We have now allocated a new chunk. We copy the tuple keys into the 1794 * tuple array and copy any referenced key data into the data space 1795 * following the tuple array. As we do this, we relocate dttk_value 1796 * in the final tuple to point to the key data address in the chunk. 1797 */ 1798 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1799 dvar->dtdv_data = (void *)(kdata + ksize); 1800 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1801 1802 for (i = 0; i < nkeys; i++) { 1803 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1804 size_t kesize = key[i].dttk_size; 1805 1806 if (kesize != 0) { 1807 dtrace_bcopy( 1808 (const void *)(uintptr_t)key[i].dttk_value, 1809 (void *)kdata, kesize); 1810 dkey->dttk_value = kdata; 1811 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1812 } else { 1813 dkey->dttk_value = key[i].dttk_value; 1814 } 1815 1816 dkey->dttk_size = kesize; 1817 } 1818 1819 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1820 dvar->dtdv_hashval = hashval; 1821 dvar->dtdv_next = start; 1822 1823 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1824 return (dvar); 1825 1826 /* 1827 * The cas has failed. Either another CPU is adding an element to 1828 * this hash chain, or another CPU is deleting an element from this 1829 * hash chain. The simplest way to deal with both of these cases 1830 * (though not necessarily the most efficient) is to free our 1831 * allocated block and tail-call ourselves. Note that the free is 1832 * to the dirty list and _not_ to the free list. This is to prevent 1833 * races with allocators, above. 1834 */ 1835 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1836 1837 dtrace_membar_producer(); 1838 1839 do { 1840 free = dcpu->dtdsc_dirty; 1841 dvar->dtdv_next = free; 1842 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1843 1844 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1845 } 1846 1847 /*ARGSUSED*/ 1848 static void 1849 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1850 { 1851 if ((int64_t)nval < (int64_t)*oval) 1852 *oval = nval; 1853 } 1854 1855 /*ARGSUSED*/ 1856 static void 1857 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1858 { 1859 if ((int64_t)nval > (int64_t)*oval) 1860 *oval = nval; 1861 } 1862 1863 static void 1864 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1865 { 1866 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1867 int64_t val = (int64_t)nval; 1868 1869 if (val < 0) { 1870 for (i = 0; i < zero; i++) { 1871 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1872 quanta[i] += incr; 1873 return; 1874 } 1875 } 1876 } else { 1877 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1878 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1879 quanta[i - 1] += incr; 1880 return; 1881 } 1882 } 1883 1884 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1885 return; 1886 } 1887 1888 ASSERT(0); 1889 } 1890 1891 static void 1892 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1893 { 1894 uint64_t arg = *lquanta++; 1895 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1896 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1897 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1898 int32_t val = (int32_t)nval, level; 1899 1900 ASSERT(step != 0); 1901 ASSERT(levels != 0); 1902 1903 if (val < base) { 1904 /* 1905 * This is an underflow. 1906 */ 1907 lquanta[0] += incr; 1908 return; 1909 } 1910 1911 level = (val - base) / step; 1912 1913 if (level < levels) { 1914 lquanta[level + 1] += incr; 1915 return; 1916 } 1917 1918 /* 1919 * This is an overflow. 1920 */ 1921 lquanta[levels + 1] += incr; 1922 } 1923 1924 static int 1925 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1926 uint16_t high, uint16_t nsteps, int64_t value) 1927 { 1928 int64_t this = 1, last, next; 1929 int base = 1, order; 1930 1931 ASSERT(factor <= nsteps); 1932 ASSERT(nsteps % factor == 0); 1933 1934 for (order = 0; order < low; order++) 1935 this *= factor; 1936 1937 /* 1938 * If our value is less than our factor taken to the power of the 1939 * low order of magnitude, it goes into the zeroth bucket. 1940 */ 1941 if (value < (last = this)) 1942 return (0); 1943 1944 for (this *= factor; order <= high; order++) { 1945 int nbuckets = this > nsteps ? nsteps : this; 1946 1947 if ((next = this * factor) < this) { 1948 /* 1949 * We should not generally get log/linear quantizations 1950 * with a high magnitude that allows 64-bits to 1951 * overflow, but we nonetheless protect against this 1952 * by explicitly checking for overflow, and clamping 1953 * our value accordingly. 1954 */ 1955 value = this - 1; 1956 } 1957 1958 if (value < this) { 1959 /* 1960 * If our value lies within this order of magnitude, 1961 * determine its position by taking the offset within 1962 * the order of magnitude, dividing by the bucket 1963 * width, and adding to our (accumulated) base. 1964 */ 1965 return (base + (value - last) / (this / nbuckets)); 1966 } 1967 1968 base += nbuckets - (nbuckets / factor); 1969 last = this; 1970 this = next; 1971 } 1972 1973 /* 1974 * Our value is greater than or equal to our factor taken to the 1975 * power of one plus the high magnitude -- return the top bucket. 1976 */ 1977 return (base); 1978 } 1979 1980 static void 1981 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1982 { 1983 uint64_t arg = *llquanta++; 1984 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1985 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1986 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1987 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1988 1989 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1990 low, high, nsteps, nval)] += incr; 1991 } 1992 1993 /*ARGSUSED*/ 1994 static void 1995 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1996 { 1997 data[0]++; 1998 data[1] += nval; 1999 } 2000 2001 /*ARGSUSED*/ 2002 static void 2003 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2004 { 2005 int64_t snval = (int64_t)nval; 2006 uint64_t tmp[2]; 2007 2008 data[0]++; 2009 data[1] += nval; 2010 2011 /* 2012 * What we want to say here is: 2013 * 2014 * data[2] += nval * nval; 2015 * 2016 * But given that nval is 64-bit, we could easily overflow, so 2017 * we do this as 128-bit arithmetic. 2018 */ 2019 if (snval < 0) 2020 snval = -snval; 2021 2022 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2023 dtrace_add_128(data + 2, tmp, data + 2); 2024 } 2025 2026 /*ARGSUSED*/ 2027 static void 2028 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2029 { 2030 *oval = *oval + 1; 2031 } 2032 2033 /*ARGSUSED*/ 2034 static void 2035 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2036 { 2037 *oval += nval; 2038 } 2039 2040 /* 2041 * Aggregate given the tuple in the principal data buffer, and the aggregating 2042 * action denoted by the specified dtrace_aggregation_t. The aggregation 2043 * buffer is specified as the buf parameter. This routine does not return 2044 * failure; if there is no space in the aggregation buffer, the data will be 2045 * dropped, and a corresponding counter incremented. 2046 */ 2047 static void 2048 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2049 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2050 { 2051 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2052 uint32_t i, ndx, size, fsize; 2053 uint32_t align = sizeof (uint64_t) - 1; 2054 dtrace_aggbuffer_t *agb; 2055 dtrace_aggkey_t *key; 2056 uint32_t hashval = 0, limit, isstr; 2057 caddr_t tomax, data, kdata; 2058 dtrace_actkind_t action; 2059 dtrace_action_t *act; 2060 uintptr_t offs; 2061 2062 if (buf == NULL) 2063 return; 2064 2065 if (!agg->dtag_hasarg) { 2066 /* 2067 * Currently, only quantize() and lquantize() take additional 2068 * arguments, and they have the same semantics: an increment 2069 * value that defaults to 1 when not present. If additional 2070 * aggregating actions take arguments, the setting of the 2071 * default argument value will presumably have to become more 2072 * sophisticated... 2073 */ 2074 arg = 1; 2075 } 2076 2077 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2078 size = rec->dtrd_offset - agg->dtag_base; 2079 fsize = size + rec->dtrd_size; 2080 2081 ASSERT(dbuf->dtb_tomax != NULL); 2082 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2083 2084 if ((tomax = buf->dtb_tomax) == NULL) { 2085 dtrace_buffer_drop(buf); 2086 return; 2087 } 2088 2089 /* 2090 * The metastructure is always at the bottom of the buffer. 2091 */ 2092 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2093 sizeof (dtrace_aggbuffer_t)); 2094 2095 if (buf->dtb_offset == 0) { 2096 /* 2097 * We just kludge up approximately 1/8th of the size to be 2098 * buckets. If this guess ends up being routinely 2099 * off-the-mark, we may need to dynamically readjust this 2100 * based on past performance. 2101 */ 2102 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2103 2104 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2105 (uintptr_t)tomax || hashsize == 0) { 2106 /* 2107 * We've been given a ludicrously small buffer; 2108 * increment our drop count and leave. 2109 */ 2110 dtrace_buffer_drop(buf); 2111 return; 2112 } 2113 2114 /* 2115 * And now, a pathetic attempt to try to get a an odd (or 2116 * perchance, a prime) hash size for better hash distribution. 2117 */ 2118 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2119 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2120 2121 agb->dtagb_hashsize = hashsize; 2122 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2123 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2124 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2125 2126 for (i = 0; i < agb->dtagb_hashsize; i++) 2127 agb->dtagb_hash[i] = NULL; 2128 } 2129 2130 ASSERT(agg->dtag_first != NULL); 2131 ASSERT(agg->dtag_first->dta_intuple); 2132 2133 /* 2134 * Calculate the hash value based on the key. Note that we _don't_ 2135 * include the aggid in the hashing (but we will store it as part of 2136 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2137 * algorithm: a simple, quick algorithm that has no known funnels, and 2138 * gets good distribution in practice. The efficacy of the hashing 2139 * algorithm (and a comparison with other algorithms) may be found by 2140 * running the ::dtrace_aggstat MDB dcmd. 2141 */ 2142 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2143 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2144 limit = i + act->dta_rec.dtrd_size; 2145 ASSERT(limit <= size); 2146 isstr = DTRACEACT_ISSTRING(act); 2147 2148 for (; i < limit; i++) { 2149 hashval += data[i]; 2150 hashval += (hashval << 10); 2151 hashval ^= (hashval >> 6); 2152 2153 if (isstr && data[i] == '\0') 2154 break; 2155 } 2156 } 2157 2158 hashval += (hashval << 3); 2159 hashval ^= (hashval >> 11); 2160 hashval += (hashval << 15); 2161 2162 /* 2163 * Yes, the divide here is expensive -- but it's generally the least 2164 * of the performance issues given the amount of data that we iterate 2165 * over to compute hash values, compare data, etc. 2166 */ 2167 ndx = hashval % agb->dtagb_hashsize; 2168 2169 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2170 ASSERT((caddr_t)key >= tomax); 2171 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2172 2173 if (hashval != key->dtak_hashval || key->dtak_size != size) 2174 continue; 2175 2176 kdata = key->dtak_data; 2177 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2178 2179 for (act = agg->dtag_first; act->dta_intuple; 2180 act = act->dta_next) { 2181 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2182 limit = i + act->dta_rec.dtrd_size; 2183 ASSERT(limit <= size); 2184 isstr = DTRACEACT_ISSTRING(act); 2185 2186 for (; i < limit; i++) { 2187 if (kdata[i] != data[i]) 2188 goto next; 2189 2190 if (isstr && data[i] == '\0') 2191 break; 2192 } 2193 } 2194 2195 if (action != key->dtak_action) { 2196 /* 2197 * We are aggregating on the same value in the same 2198 * aggregation with two different aggregating actions. 2199 * (This should have been picked up in the compiler, 2200 * so we may be dealing with errant or devious DIF.) 2201 * This is an error condition; we indicate as much, 2202 * and return. 2203 */ 2204 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2205 return; 2206 } 2207 2208 /* 2209 * This is a hit: we need to apply the aggregator to 2210 * the value at this key. 2211 */ 2212 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2213 return; 2214 next: 2215 continue; 2216 } 2217 2218 /* 2219 * We didn't find it. We need to allocate some zero-filled space, 2220 * link it into the hash table appropriately, and apply the aggregator 2221 * to the (zero-filled) value. 2222 */ 2223 offs = buf->dtb_offset; 2224 while (offs & (align - 1)) 2225 offs += sizeof (uint32_t); 2226 2227 /* 2228 * If we don't have enough room to both allocate a new key _and_ 2229 * its associated data, increment the drop count and return. 2230 */ 2231 if ((uintptr_t)tomax + offs + fsize > 2232 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2233 dtrace_buffer_drop(buf); 2234 return; 2235 } 2236 2237 /*CONSTCOND*/ 2238 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2239 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2240 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2241 2242 key->dtak_data = kdata = tomax + offs; 2243 buf->dtb_offset = offs + fsize; 2244 2245 /* 2246 * Now copy the data across. 2247 */ 2248 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2249 2250 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2251 kdata[i] = data[i]; 2252 2253 /* 2254 * Because strings are not zeroed out by default, we need to iterate 2255 * looking for actions that store strings, and we need to explicitly 2256 * pad these strings out with zeroes. 2257 */ 2258 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2259 int nul; 2260 2261 if (!DTRACEACT_ISSTRING(act)) 2262 continue; 2263 2264 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2265 limit = i + act->dta_rec.dtrd_size; 2266 ASSERT(limit <= size); 2267 2268 for (nul = 0; i < limit; i++) { 2269 if (nul) { 2270 kdata[i] = '\0'; 2271 continue; 2272 } 2273 2274 if (data[i] != '\0') 2275 continue; 2276 2277 nul = 1; 2278 } 2279 } 2280 2281 for (i = size; i < fsize; i++) 2282 kdata[i] = 0; 2283 2284 key->dtak_hashval = hashval; 2285 key->dtak_size = size; 2286 key->dtak_action = action; 2287 key->dtak_next = agb->dtagb_hash[ndx]; 2288 agb->dtagb_hash[ndx] = key; 2289 2290 /* 2291 * Finally, apply the aggregator. 2292 */ 2293 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2294 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2295 } 2296 2297 /* 2298 * Given consumer state, this routine finds a speculation in the INACTIVE 2299 * state and transitions it into the ACTIVE state. If there is no speculation 2300 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2301 * incremented -- it is up to the caller to take appropriate action. 2302 */ 2303 static int 2304 dtrace_speculation(dtrace_state_t *state) 2305 { 2306 int i = 0; 2307 dtrace_speculation_state_t current; 2308 uint32_t *stat = &state->dts_speculations_unavail, count; 2309 2310 while (i < state->dts_nspeculations) { 2311 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2312 2313 current = spec->dtsp_state; 2314 2315 if (current != DTRACESPEC_INACTIVE) { 2316 if (current == DTRACESPEC_COMMITTINGMANY || 2317 current == DTRACESPEC_COMMITTING || 2318 current == DTRACESPEC_DISCARDING) 2319 stat = &state->dts_speculations_busy; 2320 i++; 2321 continue; 2322 } 2323 2324 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2325 current, DTRACESPEC_ACTIVE) == current) 2326 return (i + 1); 2327 } 2328 2329 /* 2330 * We couldn't find a speculation. If we found as much as a single 2331 * busy speculation buffer, we'll attribute this failure as "busy" 2332 * instead of "unavail". 2333 */ 2334 do { 2335 count = *stat; 2336 } while (dtrace_cas32(stat, count, count + 1) != count); 2337 2338 return (0); 2339 } 2340 2341 /* 2342 * This routine commits an active speculation. If the specified speculation 2343 * is not in a valid state to perform a commit(), this routine will silently do 2344 * nothing. The state of the specified speculation is transitioned according 2345 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2346 */ 2347 static void 2348 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2349 dtrace_specid_t which) 2350 { 2351 dtrace_speculation_t *spec; 2352 dtrace_buffer_t *src, *dest; 2353 uintptr_t daddr, saddr, dlimit, slimit; 2354 dtrace_speculation_state_t current, new = 0; 2355 intptr_t offs; 2356 uint64_t timestamp; 2357 2358 if (which == 0) 2359 return; 2360 2361 if (which > state->dts_nspeculations) { 2362 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2363 return; 2364 } 2365 2366 spec = &state->dts_speculations[which - 1]; 2367 src = &spec->dtsp_buffer[cpu]; 2368 dest = &state->dts_buffer[cpu]; 2369 2370 do { 2371 current = spec->dtsp_state; 2372 2373 if (current == DTRACESPEC_COMMITTINGMANY) 2374 break; 2375 2376 switch (current) { 2377 case DTRACESPEC_INACTIVE: 2378 case DTRACESPEC_DISCARDING: 2379 return; 2380 2381 case DTRACESPEC_COMMITTING: 2382 /* 2383 * This is only possible if we are (a) commit()'ing 2384 * without having done a prior speculate() on this CPU 2385 * and (b) racing with another commit() on a different 2386 * CPU. There's nothing to do -- we just assert that 2387 * our offset is 0. 2388 */ 2389 ASSERT(src->dtb_offset == 0); 2390 return; 2391 2392 case DTRACESPEC_ACTIVE: 2393 new = DTRACESPEC_COMMITTING; 2394 break; 2395 2396 case DTRACESPEC_ACTIVEONE: 2397 /* 2398 * This speculation is active on one CPU. If our 2399 * buffer offset is non-zero, we know that the one CPU 2400 * must be us. Otherwise, we are committing on a 2401 * different CPU from the speculate(), and we must 2402 * rely on being asynchronously cleaned. 2403 */ 2404 if (src->dtb_offset != 0) { 2405 new = DTRACESPEC_COMMITTING; 2406 break; 2407 } 2408 /*FALLTHROUGH*/ 2409 2410 case DTRACESPEC_ACTIVEMANY: 2411 new = DTRACESPEC_COMMITTINGMANY; 2412 break; 2413 2414 default: 2415 ASSERT(0); 2416 } 2417 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2418 current, new) != current); 2419 2420 /* 2421 * We have set the state to indicate that we are committing this 2422 * speculation. Now reserve the necessary space in the destination 2423 * buffer. 2424 */ 2425 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2426 sizeof (uint64_t), state, NULL)) < 0) { 2427 dtrace_buffer_drop(dest); 2428 goto out; 2429 } 2430 2431 /* 2432 * We have sufficient space to copy the speculative buffer into the 2433 * primary buffer. First, modify the speculative buffer, filling 2434 * in the timestamp of all entries with the current time. The data 2435 * must have the commit() time rather than the time it was traced, 2436 * so that all entries in the primary buffer are in timestamp order. 2437 */ 2438 timestamp = dtrace_gethrtime(); 2439 saddr = (uintptr_t)src->dtb_tomax; 2440 slimit = saddr + src->dtb_offset; 2441 while (saddr < slimit) { 2442 size_t size; 2443 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2444 2445 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2446 saddr += sizeof (dtrace_epid_t); 2447 continue; 2448 } 2449 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2450 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2451 2452 ASSERT3U(saddr + size, <=, slimit); 2453 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2454 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2455 2456 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2457 2458 saddr += size; 2459 } 2460 2461 /* 2462 * Copy the buffer across. (Note that this is a 2463 * highly subobtimal bcopy(); in the unlikely event that this becomes 2464 * a serious performance issue, a high-performance DTrace-specific 2465 * bcopy() should obviously be invented.) 2466 */ 2467 daddr = (uintptr_t)dest->dtb_tomax + offs; 2468 dlimit = daddr + src->dtb_offset; 2469 saddr = (uintptr_t)src->dtb_tomax; 2470 2471 /* 2472 * First, the aligned portion. 2473 */ 2474 while (dlimit - daddr >= sizeof (uint64_t)) { 2475 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2476 2477 daddr += sizeof (uint64_t); 2478 saddr += sizeof (uint64_t); 2479 } 2480 2481 /* 2482 * Now any left-over bit... 2483 */ 2484 while (dlimit - daddr) 2485 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2486 2487 /* 2488 * Finally, commit the reserved space in the destination buffer. 2489 */ 2490 dest->dtb_offset = offs + src->dtb_offset; 2491 2492 out: 2493 /* 2494 * If we're lucky enough to be the only active CPU on this speculation 2495 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2496 */ 2497 if (current == DTRACESPEC_ACTIVE || 2498 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2499 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2500 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2501 2502 ASSERT(rval == DTRACESPEC_COMMITTING); 2503 } 2504 2505 src->dtb_offset = 0; 2506 src->dtb_xamot_drops += src->dtb_drops; 2507 src->dtb_drops = 0; 2508 } 2509 2510 /* 2511 * This routine discards an active speculation. If the specified speculation 2512 * is not in a valid state to perform a discard(), this routine will silently 2513 * do nothing. The state of the specified speculation is transitioned 2514 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2515 */ 2516 static void 2517 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2518 dtrace_specid_t which) 2519 { 2520 dtrace_speculation_t *spec; 2521 dtrace_speculation_state_t current, new = 0; 2522 dtrace_buffer_t *buf; 2523 2524 if (which == 0) 2525 return; 2526 2527 if (which > state->dts_nspeculations) { 2528 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2529 return; 2530 } 2531 2532 spec = &state->dts_speculations[which - 1]; 2533 buf = &spec->dtsp_buffer[cpu]; 2534 2535 do { 2536 current = spec->dtsp_state; 2537 2538 switch (current) { 2539 case DTRACESPEC_INACTIVE: 2540 case DTRACESPEC_COMMITTINGMANY: 2541 case DTRACESPEC_COMMITTING: 2542 case DTRACESPEC_DISCARDING: 2543 return; 2544 2545 case DTRACESPEC_ACTIVE: 2546 case DTRACESPEC_ACTIVEMANY: 2547 new = DTRACESPEC_DISCARDING; 2548 break; 2549 2550 case DTRACESPEC_ACTIVEONE: 2551 if (buf->dtb_offset != 0) { 2552 new = DTRACESPEC_INACTIVE; 2553 } else { 2554 new = DTRACESPEC_DISCARDING; 2555 } 2556 break; 2557 2558 default: 2559 ASSERT(0); 2560 } 2561 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2562 current, new) != current); 2563 2564 buf->dtb_offset = 0; 2565 buf->dtb_drops = 0; 2566 } 2567 2568 /* 2569 * Note: not called from probe context. This function is called 2570 * asynchronously from cross call context to clean any speculations that are 2571 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2572 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2573 * speculation. 2574 */ 2575 static void 2576 dtrace_speculation_clean_here(dtrace_state_t *state) 2577 { 2578 dtrace_icookie_t cookie; 2579 processorid_t cpu = curcpu; 2580 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2581 dtrace_specid_t i; 2582 2583 cookie = dtrace_interrupt_disable(); 2584 2585 if (dest->dtb_tomax == NULL) { 2586 dtrace_interrupt_enable(cookie); 2587 return; 2588 } 2589 2590 for (i = 0; i < state->dts_nspeculations; i++) { 2591 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2592 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2593 2594 if (src->dtb_tomax == NULL) 2595 continue; 2596 2597 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2598 src->dtb_offset = 0; 2599 continue; 2600 } 2601 2602 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2603 continue; 2604 2605 if (src->dtb_offset == 0) 2606 continue; 2607 2608 dtrace_speculation_commit(state, cpu, i + 1); 2609 } 2610 2611 dtrace_interrupt_enable(cookie); 2612 } 2613 2614 /* 2615 * Note: not called from probe context. This function is called 2616 * asynchronously (and at a regular interval) to clean any speculations that 2617 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2618 * is work to be done, it cross calls all CPUs to perform that work; 2619 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2620 * INACTIVE state until they have been cleaned by all CPUs. 2621 */ 2622 static void 2623 dtrace_speculation_clean(dtrace_state_t *state) 2624 { 2625 int work = 0, rv; 2626 dtrace_specid_t i; 2627 2628 for (i = 0; i < state->dts_nspeculations; i++) { 2629 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2630 2631 ASSERT(!spec->dtsp_cleaning); 2632 2633 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2634 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2635 continue; 2636 2637 work++; 2638 spec->dtsp_cleaning = 1; 2639 } 2640 2641 if (!work) 2642 return; 2643 2644 dtrace_xcall(DTRACE_CPUALL, 2645 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2646 2647 /* 2648 * We now know that all CPUs have committed or discarded their 2649 * speculation buffers, as appropriate. We can now set the state 2650 * to inactive. 2651 */ 2652 for (i = 0; i < state->dts_nspeculations; i++) { 2653 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2654 dtrace_speculation_state_t current, new; 2655 2656 if (!spec->dtsp_cleaning) 2657 continue; 2658 2659 current = spec->dtsp_state; 2660 ASSERT(current == DTRACESPEC_DISCARDING || 2661 current == DTRACESPEC_COMMITTINGMANY); 2662 2663 new = DTRACESPEC_INACTIVE; 2664 2665 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2666 ASSERT(rv == current); 2667 spec->dtsp_cleaning = 0; 2668 } 2669 } 2670 2671 /* 2672 * Called as part of a speculate() to get the speculative buffer associated 2673 * with a given speculation. Returns NULL if the specified speculation is not 2674 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2675 * the active CPU is not the specified CPU -- the speculation will be 2676 * atomically transitioned into the ACTIVEMANY state. 2677 */ 2678 static dtrace_buffer_t * 2679 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2680 dtrace_specid_t which) 2681 { 2682 dtrace_speculation_t *spec; 2683 dtrace_speculation_state_t current, new = 0; 2684 dtrace_buffer_t *buf; 2685 2686 if (which == 0) 2687 return (NULL); 2688 2689 if (which > state->dts_nspeculations) { 2690 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2691 return (NULL); 2692 } 2693 2694 spec = &state->dts_speculations[which - 1]; 2695 buf = &spec->dtsp_buffer[cpuid]; 2696 2697 do { 2698 current = spec->dtsp_state; 2699 2700 switch (current) { 2701 case DTRACESPEC_INACTIVE: 2702 case DTRACESPEC_COMMITTINGMANY: 2703 case DTRACESPEC_DISCARDING: 2704 return (NULL); 2705 2706 case DTRACESPEC_COMMITTING: 2707 ASSERT(buf->dtb_offset == 0); 2708 return (NULL); 2709 2710 case DTRACESPEC_ACTIVEONE: 2711 /* 2712 * This speculation is currently active on one CPU. 2713 * Check the offset in the buffer; if it's non-zero, 2714 * that CPU must be us (and we leave the state alone). 2715 * If it's zero, assume that we're starting on a new 2716 * CPU -- and change the state to indicate that the 2717 * speculation is active on more than one CPU. 2718 */ 2719 if (buf->dtb_offset != 0) 2720 return (buf); 2721 2722 new = DTRACESPEC_ACTIVEMANY; 2723 break; 2724 2725 case DTRACESPEC_ACTIVEMANY: 2726 return (buf); 2727 2728 case DTRACESPEC_ACTIVE: 2729 new = DTRACESPEC_ACTIVEONE; 2730 break; 2731 2732 default: 2733 ASSERT(0); 2734 } 2735 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2736 current, new) != current); 2737 2738 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2739 return (buf); 2740 } 2741 2742 /* 2743 * Return a string. In the event that the user lacks the privilege to access 2744 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2745 * don't fail access checking. 2746 * 2747 * dtrace_dif_variable() uses this routine as a helper for various 2748 * builtin values such as 'execname' and 'probefunc.' 2749 */ 2750 uintptr_t 2751 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2752 dtrace_mstate_t *mstate) 2753 { 2754 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2755 uintptr_t ret; 2756 size_t strsz; 2757 2758 /* 2759 * The easy case: this probe is allowed to read all of memory, so 2760 * we can just return this as a vanilla pointer. 2761 */ 2762 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2763 return (addr); 2764 2765 /* 2766 * This is the tougher case: we copy the string in question from 2767 * kernel memory into scratch memory and return it that way: this 2768 * ensures that we won't trip up when access checking tests the 2769 * BYREF return value. 2770 */ 2771 strsz = dtrace_strlen((char *)addr, size) + 1; 2772 2773 if (mstate->dtms_scratch_ptr + strsz > 2774 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2775 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2776 return (0); 2777 } 2778 2779 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2780 strsz); 2781 ret = mstate->dtms_scratch_ptr; 2782 mstate->dtms_scratch_ptr += strsz; 2783 return (ret); 2784 } 2785 2786 /* 2787 * Return a string from a memoy address which is known to have one or 2788 * more concatenated, individually zero terminated, sub-strings. 2789 * In the event that the user lacks the privilege to access 2790 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2791 * don't fail access checking. 2792 * 2793 * dtrace_dif_variable() uses this routine as a helper for various 2794 * builtin values such as 'execargs'. 2795 */ 2796 static uintptr_t 2797 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2798 dtrace_mstate_t *mstate) 2799 { 2800 char *p; 2801 size_t i; 2802 uintptr_t ret; 2803 2804 if (mstate->dtms_scratch_ptr + strsz > 2805 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2806 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2807 return (0); 2808 } 2809 2810 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2811 strsz); 2812 2813 /* Replace sub-string termination characters with a space. */ 2814 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2815 p++, i++) 2816 if (*p == '\0') 2817 *p = ' '; 2818 2819 ret = mstate->dtms_scratch_ptr; 2820 mstate->dtms_scratch_ptr += strsz; 2821 return (ret); 2822 } 2823 2824 /* 2825 * This function implements the DIF emulator's variable lookups. The emulator 2826 * passes a reserved variable identifier and optional built-in array index. 2827 */ 2828 static uint64_t 2829 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2830 uint64_t ndx) 2831 { 2832 /* 2833 * If we're accessing one of the uncached arguments, we'll turn this 2834 * into a reference in the args array. 2835 */ 2836 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2837 ndx = v - DIF_VAR_ARG0; 2838 v = DIF_VAR_ARGS; 2839 } 2840 2841 switch (v) { 2842 case DIF_VAR_ARGS: 2843 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2844 if (ndx >= sizeof (mstate->dtms_arg) / 2845 sizeof (mstate->dtms_arg[0])) { 2846 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2847 dtrace_provider_t *pv; 2848 uint64_t val; 2849 2850 pv = mstate->dtms_probe->dtpr_provider; 2851 if (pv->dtpv_pops.dtps_getargval != NULL) 2852 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2853 mstate->dtms_probe->dtpr_id, 2854 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2855 else 2856 val = dtrace_getarg(ndx, aframes); 2857 2858 /* 2859 * This is regrettably required to keep the compiler 2860 * from tail-optimizing the call to dtrace_getarg(). 2861 * The condition always evaluates to true, but the 2862 * compiler has no way of figuring that out a priori. 2863 * (None of this would be necessary if the compiler 2864 * could be relied upon to _always_ tail-optimize 2865 * the call to dtrace_getarg() -- but it can't.) 2866 */ 2867 if (mstate->dtms_probe != NULL) 2868 return (val); 2869 2870 ASSERT(0); 2871 } 2872 2873 return (mstate->dtms_arg[ndx]); 2874 2875 #if defined(sun) 2876 case DIF_VAR_UREGS: { 2877 klwp_t *lwp; 2878 2879 if (!dtrace_priv_proc(state)) 2880 return (0); 2881 2882 if ((lwp = curthread->t_lwp) == NULL) { 2883 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2884 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2885 return (0); 2886 } 2887 2888 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2889 return (0); 2890 } 2891 #else 2892 case DIF_VAR_UREGS: { 2893 struct trapframe *tframe; 2894 2895 if (!dtrace_priv_proc(state)) 2896 return (0); 2897 2898 if ((tframe = curthread->td_frame) == NULL) { 2899 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2900 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2901 return (0); 2902 } 2903 2904 return (dtrace_getreg(tframe, ndx)); 2905 } 2906 #endif 2907 2908 case DIF_VAR_CURTHREAD: 2909 if (!dtrace_priv_kernel(state)) 2910 return (0); 2911 return ((uint64_t)(uintptr_t)curthread); 2912 2913 case DIF_VAR_TIMESTAMP: 2914 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2915 mstate->dtms_timestamp = dtrace_gethrtime(); 2916 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2917 } 2918 return (mstate->dtms_timestamp); 2919 2920 case DIF_VAR_VTIMESTAMP: 2921 ASSERT(dtrace_vtime_references != 0); 2922 return (curthread->t_dtrace_vtime); 2923 2924 case DIF_VAR_WALLTIMESTAMP: 2925 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2926 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2927 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2928 } 2929 return (mstate->dtms_walltimestamp); 2930 2931 #if defined(sun) 2932 case DIF_VAR_IPL: 2933 if (!dtrace_priv_kernel(state)) 2934 return (0); 2935 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2936 mstate->dtms_ipl = dtrace_getipl(); 2937 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2938 } 2939 return (mstate->dtms_ipl); 2940 #endif 2941 2942 case DIF_VAR_EPID: 2943 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2944 return (mstate->dtms_epid); 2945 2946 case DIF_VAR_ID: 2947 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2948 return (mstate->dtms_probe->dtpr_id); 2949 2950 case DIF_VAR_STACKDEPTH: 2951 if (!dtrace_priv_kernel(state)) 2952 return (0); 2953 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2954 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2955 2956 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2957 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2958 } 2959 return (mstate->dtms_stackdepth); 2960 2961 case DIF_VAR_USTACKDEPTH: 2962 if (!dtrace_priv_proc(state)) 2963 return (0); 2964 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2965 /* 2966 * See comment in DIF_VAR_PID. 2967 */ 2968 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2969 CPU_ON_INTR(CPU)) { 2970 mstate->dtms_ustackdepth = 0; 2971 } else { 2972 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2973 mstate->dtms_ustackdepth = 2974 dtrace_getustackdepth(); 2975 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2976 } 2977 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2978 } 2979 return (mstate->dtms_ustackdepth); 2980 2981 case DIF_VAR_CALLER: 2982 if (!dtrace_priv_kernel(state)) 2983 return (0); 2984 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2985 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2986 2987 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2988 /* 2989 * If this is an unanchored probe, we are 2990 * required to go through the slow path: 2991 * dtrace_caller() only guarantees correct 2992 * results for anchored probes. 2993 */ 2994 pc_t caller[2] = {0, 0}; 2995 2996 dtrace_getpcstack(caller, 2, aframes, 2997 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2998 mstate->dtms_caller = caller[1]; 2999 } else if ((mstate->dtms_caller = 3000 dtrace_caller(aframes)) == -1) { 3001 /* 3002 * We have failed to do this the quick way; 3003 * we must resort to the slower approach of 3004 * calling dtrace_getpcstack(). 3005 */ 3006 pc_t caller = 0; 3007 3008 dtrace_getpcstack(&caller, 1, aframes, NULL); 3009 mstate->dtms_caller = caller; 3010 } 3011 3012 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3013 } 3014 return (mstate->dtms_caller); 3015 3016 case DIF_VAR_UCALLER: 3017 if (!dtrace_priv_proc(state)) 3018 return (0); 3019 3020 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3021 uint64_t ustack[3]; 3022 3023 /* 3024 * dtrace_getupcstack() fills in the first uint64_t 3025 * with the current PID. The second uint64_t will 3026 * be the program counter at user-level. The third 3027 * uint64_t will contain the caller, which is what 3028 * we're after. 3029 */ 3030 ustack[2] = 0; 3031 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3032 dtrace_getupcstack(ustack, 3); 3033 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3034 mstate->dtms_ucaller = ustack[2]; 3035 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3036 } 3037 3038 return (mstate->dtms_ucaller); 3039 3040 case DIF_VAR_PROBEPROV: 3041 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3042 return (dtrace_dif_varstr( 3043 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3044 state, mstate)); 3045 3046 case DIF_VAR_PROBEMOD: 3047 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3048 return (dtrace_dif_varstr( 3049 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3050 state, mstate)); 3051 3052 case DIF_VAR_PROBEFUNC: 3053 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3054 return (dtrace_dif_varstr( 3055 (uintptr_t)mstate->dtms_probe->dtpr_func, 3056 state, mstate)); 3057 3058 case DIF_VAR_PROBENAME: 3059 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3060 return (dtrace_dif_varstr( 3061 (uintptr_t)mstate->dtms_probe->dtpr_name, 3062 state, mstate)); 3063 3064 case DIF_VAR_PID: 3065 if (!dtrace_priv_proc(state)) 3066 return (0); 3067 3068 #if defined(sun) 3069 /* 3070 * Note that we are assuming that an unanchored probe is 3071 * always due to a high-level interrupt. (And we're assuming 3072 * that there is only a single high level interrupt.) 3073 */ 3074 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3075 return (pid0.pid_id); 3076 3077 /* 3078 * It is always safe to dereference one's own t_procp pointer: 3079 * it always points to a valid, allocated proc structure. 3080 * Further, it is always safe to dereference the p_pidp member 3081 * of one's own proc structure. (These are truisms becuase 3082 * threads and processes don't clean up their own state -- 3083 * they leave that task to whomever reaps them.) 3084 */ 3085 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3086 #else 3087 return ((uint64_t)curproc->p_pid); 3088 #endif 3089 3090 case DIF_VAR_PPID: 3091 if (!dtrace_priv_proc(state)) 3092 return (0); 3093 3094 #if defined(sun) 3095 /* 3096 * See comment in DIF_VAR_PID. 3097 */ 3098 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3099 return (pid0.pid_id); 3100 3101 /* 3102 * It is always safe to dereference one's own t_procp pointer: 3103 * it always points to a valid, allocated proc structure. 3104 * (This is true because threads don't clean up their own 3105 * state -- they leave that task to whomever reaps them.) 3106 */ 3107 return ((uint64_t)curthread->t_procp->p_ppid); 3108 #else 3109 return ((uint64_t)curproc->p_pptr->p_pid); 3110 #endif 3111 3112 case DIF_VAR_TID: 3113 #if defined(sun) 3114 /* 3115 * See comment in DIF_VAR_PID. 3116 */ 3117 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3118 return (0); 3119 #endif 3120 3121 return ((uint64_t)curthread->t_tid); 3122 3123 case DIF_VAR_EXECARGS: { 3124 struct pargs *p_args = curthread->td_proc->p_args; 3125 3126 if (p_args == NULL) 3127 return(0); 3128 3129 return (dtrace_dif_varstrz( 3130 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3131 } 3132 3133 case DIF_VAR_EXECNAME: 3134 #if defined(sun) 3135 if (!dtrace_priv_proc(state)) 3136 return (0); 3137 3138 /* 3139 * See comment in DIF_VAR_PID. 3140 */ 3141 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3142 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3143 3144 /* 3145 * It is always safe to dereference one's own t_procp pointer: 3146 * it always points to a valid, allocated proc structure. 3147 * (This is true because threads don't clean up their own 3148 * state -- they leave that task to whomever reaps them.) 3149 */ 3150 return (dtrace_dif_varstr( 3151 (uintptr_t)curthread->t_procp->p_user.u_comm, 3152 state, mstate)); 3153 #else 3154 return (dtrace_dif_varstr( 3155 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3156 #endif 3157 3158 case DIF_VAR_ZONENAME: 3159 #if defined(sun) 3160 if (!dtrace_priv_proc(state)) 3161 return (0); 3162 3163 /* 3164 * See comment in DIF_VAR_PID. 3165 */ 3166 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3167 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3168 3169 /* 3170 * It is always safe to dereference one's own t_procp pointer: 3171 * it always points to a valid, allocated proc structure. 3172 * (This is true because threads don't clean up their own 3173 * state -- they leave that task to whomever reaps them.) 3174 */ 3175 return (dtrace_dif_varstr( 3176 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3177 state, mstate)); 3178 #else 3179 return (0); 3180 #endif 3181 3182 case DIF_VAR_UID: 3183 if (!dtrace_priv_proc(state)) 3184 return (0); 3185 3186 #if defined(sun) 3187 /* 3188 * See comment in DIF_VAR_PID. 3189 */ 3190 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3191 return ((uint64_t)p0.p_cred->cr_uid); 3192 #endif 3193 3194 /* 3195 * It is always safe to dereference one's own t_procp pointer: 3196 * it always points to a valid, allocated proc structure. 3197 * (This is true because threads don't clean up their own 3198 * state -- they leave that task to whomever reaps them.) 3199 * 3200 * Additionally, it is safe to dereference one's own process 3201 * credential, since this is never NULL after process birth. 3202 */ 3203 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3204 3205 case DIF_VAR_GID: 3206 if (!dtrace_priv_proc(state)) 3207 return (0); 3208 3209 #if defined(sun) 3210 /* 3211 * See comment in DIF_VAR_PID. 3212 */ 3213 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3214 return ((uint64_t)p0.p_cred->cr_gid); 3215 #endif 3216 3217 /* 3218 * It is always safe to dereference one's own t_procp pointer: 3219 * it always points to a valid, allocated proc structure. 3220 * (This is true because threads don't clean up their own 3221 * state -- they leave that task to whomever reaps them.) 3222 * 3223 * Additionally, it is safe to dereference one's own process 3224 * credential, since this is never NULL after process birth. 3225 */ 3226 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3227 3228 case DIF_VAR_ERRNO: { 3229 #if defined(sun) 3230 klwp_t *lwp; 3231 if (!dtrace_priv_proc(state)) 3232 return (0); 3233 3234 /* 3235 * See comment in DIF_VAR_PID. 3236 */ 3237 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3238 return (0); 3239 3240 /* 3241 * It is always safe to dereference one's own t_lwp pointer in 3242 * the event that this pointer is non-NULL. (This is true 3243 * because threads and lwps don't clean up their own state -- 3244 * they leave that task to whomever reaps them.) 3245 */ 3246 if ((lwp = curthread->t_lwp) == NULL) 3247 return (0); 3248 3249 return ((uint64_t)lwp->lwp_errno); 3250 #else 3251 return (curthread->td_errno); 3252 #endif 3253 } 3254 #if !defined(sun) 3255 case DIF_VAR_CPU: { 3256 return curcpu; 3257 } 3258 #endif 3259 default: 3260 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3261 return (0); 3262 } 3263 } 3264 3265 /* 3266 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3267 * Notice that we don't bother validating the proper number of arguments or 3268 * their types in the tuple stack. This isn't needed because all argument 3269 * interpretation is safe because of our load safety -- the worst that can 3270 * happen is that a bogus program can obtain bogus results. 3271 */ 3272 static void 3273 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3274 dtrace_key_t *tupregs, int nargs, 3275 dtrace_mstate_t *mstate, dtrace_state_t *state) 3276 { 3277 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3278 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3279 dtrace_vstate_t *vstate = &state->dts_vstate; 3280 3281 #if defined(sun) 3282 union { 3283 mutex_impl_t mi; 3284 uint64_t mx; 3285 } m; 3286 3287 union { 3288 krwlock_t ri; 3289 uintptr_t rw; 3290 } r; 3291 #else 3292 struct thread *lowner; 3293 union { 3294 struct lock_object *li; 3295 uintptr_t lx; 3296 } l; 3297 #endif 3298 3299 switch (subr) { 3300 case DIF_SUBR_RAND: 3301 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3302 break; 3303 3304 #if defined(sun) 3305 case DIF_SUBR_MUTEX_OWNED: 3306 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3307 mstate, vstate)) { 3308 regs[rd] = 0; 3309 break; 3310 } 3311 3312 m.mx = dtrace_load64(tupregs[0].dttk_value); 3313 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3314 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3315 else 3316 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3317 break; 3318 3319 case DIF_SUBR_MUTEX_OWNER: 3320 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3321 mstate, vstate)) { 3322 regs[rd] = 0; 3323 break; 3324 } 3325 3326 m.mx = dtrace_load64(tupregs[0].dttk_value); 3327 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3328 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3329 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3330 else 3331 regs[rd] = 0; 3332 break; 3333 3334 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3335 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3336 mstate, vstate)) { 3337 regs[rd] = 0; 3338 break; 3339 } 3340 3341 m.mx = dtrace_load64(tupregs[0].dttk_value); 3342 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3343 break; 3344 3345 case DIF_SUBR_MUTEX_TYPE_SPIN: 3346 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3347 mstate, vstate)) { 3348 regs[rd] = 0; 3349 break; 3350 } 3351 3352 m.mx = dtrace_load64(tupregs[0].dttk_value); 3353 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3354 break; 3355 3356 case DIF_SUBR_RW_READ_HELD: { 3357 uintptr_t tmp; 3358 3359 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3360 mstate, vstate)) { 3361 regs[rd] = 0; 3362 break; 3363 } 3364 3365 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3366 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3367 break; 3368 } 3369 3370 case DIF_SUBR_RW_WRITE_HELD: 3371 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3372 mstate, vstate)) { 3373 regs[rd] = 0; 3374 break; 3375 } 3376 3377 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3378 regs[rd] = _RW_WRITE_HELD(&r.ri); 3379 break; 3380 3381 case DIF_SUBR_RW_ISWRITER: 3382 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3383 mstate, vstate)) { 3384 regs[rd] = 0; 3385 break; 3386 } 3387 3388 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3389 regs[rd] = _RW_ISWRITER(&r.ri); 3390 break; 3391 3392 #else 3393 case DIF_SUBR_MUTEX_OWNED: 3394 if (!dtrace_canload(tupregs[0].dttk_value, 3395 sizeof (struct lock_object), mstate, vstate)) { 3396 regs[rd] = 0; 3397 break; 3398 } 3399 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3400 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3401 break; 3402 3403 case DIF_SUBR_MUTEX_OWNER: 3404 if (!dtrace_canload(tupregs[0].dttk_value, 3405 sizeof (struct lock_object), mstate, vstate)) { 3406 regs[rd] = 0; 3407 break; 3408 } 3409 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3410 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3411 regs[rd] = (uintptr_t)lowner; 3412 break; 3413 3414 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3415 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3416 mstate, vstate)) { 3417 regs[rd] = 0; 3418 break; 3419 } 3420 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3421 /* XXX - should be only LC_SLEEPABLE? */ 3422 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3423 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3424 break; 3425 3426 case DIF_SUBR_MUTEX_TYPE_SPIN: 3427 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3428 mstate, vstate)) { 3429 regs[rd] = 0; 3430 break; 3431 } 3432 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3433 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3434 break; 3435 3436 case DIF_SUBR_RW_READ_HELD: 3437 case DIF_SUBR_SX_SHARED_HELD: 3438 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3439 mstate, vstate)) { 3440 regs[rd] = 0; 3441 break; 3442 } 3443 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3444 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3445 lowner == NULL; 3446 break; 3447 3448 case DIF_SUBR_RW_WRITE_HELD: 3449 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3450 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3451 mstate, vstate)) { 3452 regs[rd] = 0; 3453 break; 3454 } 3455 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3456 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3457 regs[rd] = (lowner == curthread); 3458 break; 3459 3460 case DIF_SUBR_RW_ISWRITER: 3461 case DIF_SUBR_SX_ISEXCLUSIVE: 3462 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3463 mstate, vstate)) { 3464 regs[rd] = 0; 3465 break; 3466 } 3467 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3468 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3469 lowner != NULL; 3470 break; 3471 #endif /* ! defined(sun) */ 3472 3473 case DIF_SUBR_BCOPY: { 3474 /* 3475 * We need to be sure that the destination is in the scratch 3476 * region -- no other region is allowed. 3477 */ 3478 uintptr_t src = tupregs[0].dttk_value; 3479 uintptr_t dest = tupregs[1].dttk_value; 3480 size_t size = tupregs[2].dttk_value; 3481 3482 if (!dtrace_inscratch(dest, size, mstate)) { 3483 *flags |= CPU_DTRACE_BADADDR; 3484 *illval = regs[rd]; 3485 break; 3486 } 3487 3488 if (!dtrace_canload(src, size, mstate, vstate)) { 3489 regs[rd] = 0; 3490 break; 3491 } 3492 3493 dtrace_bcopy((void *)src, (void *)dest, size); 3494 break; 3495 } 3496 3497 case DIF_SUBR_ALLOCA: 3498 case DIF_SUBR_COPYIN: { 3499 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3500 uint64_t size = 3501 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3502 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3503 3504 /* 3505 * This action doesn't require any credential checks since 3506 * probes will not activate in user contexts to which the 3507 * enabling user does not have permissions. 3508 */ 3509 3510 /* 3511 * Rounding up the user allocation size could have overflowed 3512 * a large, bogus allocation (like -1ULL) to 0. 3513 */ 3514 if (scratch_size < size || 3515 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3516 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3517 regs[rd] = 0; 3518 break; 3519 } 3520 3521 if (subr == DIF_SUBR_COPYIN) { 3522 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3523 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3524 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3525 } 3526 3527 mstate->dtms_scratch_ptr += scratch_size; 3528 regs[rd] = dest; 3529 break; 3530 } 3531 3532 case DIF_SUBR_COPYINTO: { 3533 uint64_t size = tupregs[1].dttk_value; 3534 uintptr_t dest = tupregs[2].dttk_value; 3535 3536 /* 3537 * This action doesn't require any credential checks since 3538 * probes will not activate in user contexts to which the 3539 * enabling user does not have permissions. 3540 */ 3541 if (!dtrace_inscratch(dest, size, mstate)) { 3542 *flags |= CPU_DTRACE_BADADDR; 3543 *illval = regs[rd]; 3544 break; 3545 } 3546 3547 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3548 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3549 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3550 break; 3551 } 3552 3553 case DIF_SUBR_COPYINSTR: { 3554 uintptr_t dest = mstate->dtms_scratch_ptr; 3555 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3556 3557 if (nargs > 1 && tupregs[1].dttk_value < size) 3558 size = tupregs[1].dttk_value + 1; 3559 3560 /* 3561 * This action doesn't require any credential checks since 3562 * probes will not activate in user contexts to which the 3563 * enabling user does not have permissions. 3564 */ 3565 if (!DTRACE_INSCRATCH(mstate, size)) { 3566 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3567 regs[rd] = 0; 3568 break; 3569 } 3570 3571 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3572 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3573 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3574 3575 ((char *)dest)[size - 1] = '\0'; 3576 mstate->dtms_scratch_ptr += size; 3577 regs[rd] = dest; 3578 break; 3579 } 3580 3581 #if defined(sun) 3582 case DIF_SUBR_MSGSIZE: 3583 case DIF_SUBR_MSGDSIZE: { 3584 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3585 uintptr_t wptr, rptr; 3586 size_t count = 0; 3587 int cont = 0; 3588 3589 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3590 3591 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3592 vstate)) { 3593 regs[rd] = 0; 3594 break; 3595 } 3596 3597 wptr = dtrace_loadptr(baddr + 3598 offsetof(mblk_t, b_wptr)); 3599 3600 rptr = dtrace_loadptr(baddr + 3601 offsetof(mblk_t, b_rptr)); 3602 3603 if (wptr < rptr) { 3604 *flags |= CPU_DTRACE_BADADDR; 3605 *illval = tupregs[0].dttk_value; 3606 break; 3607 } 3608 3609 daddr = dtrace_loadptr(baddr + 3610 offsetof(mblk_t, b_datap)); 3611 3612 baddr = dtrace_loadptr(baddr + 3613 offsetof(mblk_t, b_cont)); 3614 3615 /* 3616 * We want to prevent against denial-of-service here, 3617 * so we're only going to search the list for 3618 * dtrace_msgdsize_max mblks. 3619 */ 3620 if (cont++ > dtrace_msgdsize_max) { 3621 *flags |= CPU_DTRACE_ILLOP; 3622 break; 3623 } 3624 3625 if (subr == DIF_SUBR_MSGDSIZE) { 3626 if (dtrace_load8(daddr + 3627 offsetof(dblk_t, db_type)) != M_DATA) 3628 continue; 3629 } 3630 3631 count += wptr - rptr; 3632 } 3633 3634 if (!(*flags & CPU_DTRACE_FAULT)) 3635 regs[rd] = count; 3636 3637 break; 3638 } 3639 #endif 3640 3641 case DIF_SUBR_PROGENYOF: { 3642 pid_t pid = tupregs[0].dttk_value; 3643 proc_t *p; 3644 int rval = 0; 3645 3646 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3647 3648 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3649 #if defined(sun) 3650 if (p->p_pidp->pid_id == pid) { 3651 #else 3652 if (p->p_pid == pid) { 3653 #endif 3654 rval = 1; 3655 break; 3656 } 3657 } 3658 3659 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3660 3661 regs[rd] = rval; 3662 break; 3663 } 3664 3665 case DIF_SUBR_SPECULATION: 3666 regs[rd] = dtrace_speculation(state); 3667 break; 3668 3669 case DIF_SUBR_COPYOUT: { 3670 uintptr_t kaddr = tupregs[0].dttk_value; 3671 uintptr_t uaddr = tupregs[1].dttk_value; 3672 uint64_t size = tupregs[2].dttk_value; 3673 3674 if (!dtrace_destructive_disallow && 3675 dtrace_priv_proc_control(state) && 3676 !dtrace_istoxic(kaddr, size)) { 3677 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3678 dtrace_copyout(kaddr, uaddr, size, flags); 3679 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3680 } 3681 break; 3682 } 3683 3684 case DIF_SUBR_COPYOUTSTR: { 3685 uintptr_t kaddr = tupregs[0].dttk_value; 3686 uintptr_t uaddr = tupregs[1].dttk_value; 3687 uint64_t size = tupregs[2].dttk_value; 3688 3689 if (!dtrace_destructive_disallow && 3690 dtrace_priv_proc_control(state) && 3691 !dtrace_istoxic(kaddr, size)) { 3692 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3693 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3694 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3695 } 3696 break; 3697 } 3698 3699 case DIF_SUBR_STRLEN: { 3700 size_t sz; 3701 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3702 sz = dtrace_strlen((char *)addr, 3703 state->dts_options[DTRACEOPT_STRSIZE]); 3704 3705 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3706 regs[rd] = 0; 3707 break; 3708 } 3709 3710 regs[rd] = sz; 3711 3712 break; 3713 } 3714 3715 case DIF_SUBR_STRCHR: 3716 case DIF_SUBR_STRRCHR: { 3717 /* 3718 * We're going to iterate over the string looking for the 3719 * specified character. We will iterate until we have reached 3720 * the string length or we have found the character. If this 3721 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3722 * of the specified character instead of the first. 3723 */ 3724 uintptr_t saddr = tupregs[0].dttk_value; 3725 uintptr_t addr = tupregs[0].dttk_value; 3726 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3727 char c, target = (char)tupregs[1].dttk_value; 3728 3729 for (regs[rd] = 0; addr < limit; addr++) { 3730 if ((c = dtrace_load8(addr)) == target) { 3731 regs[rd] = addr; 3732 3733 if (subr == DIF_SUBR_STRCHR) 3734 break; 3735 } 3736 3737 if (c == '\0') 3738 break; 3739 } 3740 3741 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3742 regs[rd] = 0; 3743 break; 3744 } 3745 3746 break; 3747 } 3748 3749 case DIF_SUBR_STRSTR: 3750 case DIF_SUBR_INDEX: 3751 case DIF_SUBR_RINDEX: { 3752 /* 3753 * We're going to iterate over the string looking for the 3754 * specified string. We will iterate until we have reached 3755 * the string length or we have found the string. (Yes, this 3756 * is done in the most naive way possible -- but considering 3757 * that the string we're searching for is likely to be 3758 * relatively short, the complexity of Rabin-Karp or similar 3759 * hardly seems merited.) 3760 */ 3761 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3762 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3763 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3764 size_t len = dtrace_strlen(addr, size); 3765 size_t sublen = dtrace_strlen(substr, size); 3766 char *limit = addr + len, *orig = addr; 3767 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3768 int inc = 1; 3769 3770 regs[rd] = notfound; 3771 3772 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3773 regs[rd] = 0; 3774 break; 3775 } 3776 3777 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3778 vstate)) { 3779 regs[rd] = 0; 3780 break; 3781 } 3782 3783 /* 3784 * strstr() and index()/rindex() have similar semantics if 3785 * both strings are the empty string: strstr() returns a 3786 * pointer to the (empty) string, and index() and rindex() 3787 * both return index 0 (regardless of any position argument). 3788 */ 3789 if (sublen == 0 && len == 0) { 3790 if (subr == DIF_SUBR_STRSTR) 3791 regs[rd] = (uintptr_t)addr; 3792 else 3793 regs[rd] = 0; 3794 break; 3795 } 3796 3797 if (subr != DIF_SUBR_STRSTR) { 3798 if (subr == DIF_SUBR_RINDEX) { 3799 limit = orig - 1; 3800 addr += len; 3801 inc = -1; 3802 } 3803 3804 /* 3805 * Both index() and rindex() take an optional position 3806 * argument that denotes the starting position. 3807 */ 3808 if (nargs == 3) { 3809 int64_t pos = (int64_t)tupregs[2].dttk_value; 3810 3811 /* 3812 * If the position argument to index() is 3813 * negative, Perl implicitly clamps it at 3814 * zero. This semantic is a little surprising 3815 * given the special meaning of negative 3816 * positions to similar Perl functions like 3817 * substr(), but it appears to reflect a 3818 * notion that index() can start from a 3819 * negative index and increment its way up to 3820 * the string. Given this notion, Perl's 3821 * rindex() is at least self-consistent in 3822 * that it implicitly clamps positions greater 3823 * than the string length to be the string 3824 * length. Where Perl completely loses 3825 * coherence, however, is when the specified 3826 * substring is the empty string (""). In 3827 * this case, even if the position is 3828 * negative, rindex() returns 0 -- and even if 3829 * the position is greater than the length, 3830 * index() returns the string length. These 3831 * semantics violate the notion that index() 3832 * should never return a value less than the 3833 * specified position and that rindex() should 3834 * never return a value greater than the 3835 * specified position. (One assumes that 3836 * these semantics are artifacts of Perl's 3837 * implementation and not the results of 3838 * deliberate design -- it beggars belief that 3839 * even Larry Wall could desire such oddness.) 3840 * While in the abstract one would wish for 3841 * consistent position semantics across 3842 * substr(), index() and rindex() -- or at the 3843 * very least self-consistent position 3844 * semantics for index() and rindex() -- we 3845 * instead opt to keep with the extant Perl 3846 * semantics, in all their broken glory. (Do 3847 * we have more desire to maintain Perl's 3848 * semantics than Perl does? Probably.) 3849 */ 3850 if (subr == DIF_SUBR_RINDEX) { 3851 if (pos < 0) { 3852 if (sublen == 0) 3853 regs[rd] = 0; 3854 break; 3855 } 3856 3857 if (pos > len) 3858 pos = len; 3859 } else { 3860 if (pos < 0) 3861 pos = 0; 3862 3863 if (pos >= len) { 3864 if (sublen == 0) 3865 regs[rd] = len; 3866 break; 3867 } 3868 } 3869 3870 addr = orig + pos; 3871 } 3872 } 3873 3874 for (regs[rd] = notfound; addr != limit; addr += inc) { 3875 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3876 if (subr != DIF_SUBR_STRSTR) { 3877 /* 3878 * As D index() and rindex() are 3879 * modeled on Perl (and not on awk), 3880 * we return a zero-based (and not a 3881 * one-based) index. (For you Perl 3882 * weenies: no, we're not going to add 3883 * $[ -- and shouldn't you be at a con 3884 * or something?) 3885 */ 3886 regs[rd] = (uintptr_t)(addr - orig); 3887 break; 3888 } 3889 3890 ASSERT(subr == DIF_SUBR_STRSTR); 3891 regs[rd] = (uintptr_t)addr; 3892 break; 3893 } 3894 } 3895 3896 break; 3897 } 3898 3899 case DIF_SUBR_STRTOK: { 3900 uintptr_t addr = tupregs[0].dttk_value; 3901 uintptr_t tokaddr = tupregs[1].dttk_value; 3902 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3903 uintptr_t limit, toklimit = tokaddr + size; 3904 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3905 char *dest = (char *)mstate->dtms_scratch_ptr; 3906 int i; 3907 3908 /* 3909 * Check both the token buffer and (later) the input buffer, 3910 * since both could be non-scratch addresses. 3911 */ 3912 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3913 regs[rd] = 0; 3914 break; 3915 } 3916 3917 if (!DTRACE_INSCRATCH(mstate, size)) { 3918 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3919 regs[rd] = 0; 3920 break; 3921 } 3922 3923 if (addr == 0) { 3924 /* 3925 * If the address specified is NULL, we use our saved 3926 * strtok pointer from the mstate. Note that this 3927 * means that the saved strtok pointer is _only_ 3928 * valid within multiple enablings of the same probe -- 3929 * it behaves like an implicit clause-local variable. 3930 */ 3931 addr = mstate->dtms_strtok; 3932 } else { 3933 /* 3934 * If the user-specified address is non-NULL we must 3935 * access check it. This is the only time we have 3936 * a chance to do so, since this address may reside 3937 * in the string table of this clause-- future calls 3938 * (when we fetch addr from mstate->dtms_strtok) 3939 * would fail this access check. 3940 */ 3941 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3942 regs[rd] = 0; 3943 break; 3944 } 3945 } 3946 3947 /* 3948 * First, zero the token map, and then process the token 3949 * string -- setting a bit in the map for every character 3950 * found in the token string. 3951 */ 3952 for (i = 0; i < sizeof (tokmap); i++) 3953 tokmap[i] = 0; 3954 3955 for (; tokaddr < toklimit; tokaddr++) { 3956 if ((c = dtrace_load8(tokaddr)) == '\0') 3957 break; 3958 3959 ASSERT((c >> 3) < sizeof (tokmap)); 3960 tokmap[c >> 3] |= (1 << (c & 0x7)); 3961 } 3962 3963 for (limit = addr + size; addr < limit; addr++) { 3964 /* 3965 * We're looking for a character that is _not_ contained 3966 * in the token string. 3967 */ 3968 if ((c = dtrace_load8(addr)) == '\0') 3969 break; 3970 3971 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3972 break; 3973 } 3974 3975 if (c == '\0') { 3976 /* 3977 * We reached the end of the string without finding 3978 * any character that was not in the token string. 3979 * We return NULL in this case, and we set the saved 3980 * address to NULL as well. 3981 */ 3982 regs[rd] = 0; 3983 mstate->dtms_strtok = 0; 3984 break; 3985 } 3986 3987 /* 3988 * From here on, we're copying into the destination string. 3989 */ 3990 for (i = 0; addr < limit && i < size - 1; addr++) { 3991 if ((c = dtrace_load8(addr)) == '\0') 3992 break; 3993 3994 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3995 break; 3996 3997 ASSERT(i < size); 3998 dest[i++] = c; 3999 } 4000 4001 ASSERT(i < size); 4002 dest[i] = '\0'; 4003 regs[rd] = (uintptr_t)dest; 4004 mstate->dtms_scratch_ptr += size; 4005 mstate->dtms_strtok = addr; 4006 break; 4007 } 4008 4009 case DIF_SUBR_SUBSTR: { 4010 uintptr_t s = tupregs[0].dttk_value; 4011 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4012 char *d = (char *)mstate->dtms_scratch_ptr; 4013 int64_t index = (int64_t)tupregs[1].dttk_value; 4014 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4015 size_t len = dtrace_strlen((char *)s, size); 4016 int64_t i = 0; 4017 4018 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4019 regs[rd] = 0; 4020 break; 4021 } 4022 4023 if (!DTRACE_INSCRATCH(mstate, size)) { 4024 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4025 regs[rd] = 0; 4026 break; 4027 } 4028 4029 if (nargs <= 2) 4030 remaining = (int64_t)size; 4031 4032 if (index < 0) { 4033 index += len; 4034 4035 if (index < 0 && index + remaining > 0) { 4036 remaining += index; 4037 index = 0; 4038 } 4039 } 4040 4041 if (index >= len || index < 0) { 4042 remaining = 0; 4043 } else if (remaining < 0) { 4044 remaining += len - index; 4045 } else if (index + remaining > size) { 4046 remaining = size - index; 4047 } 4048 4049 for (i = 0; i < remaining; i++) { 4050 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4051 break; 4052 } 4053 4054 d[i] = '\0'; 4055 4056 mstate->dtms_scratch_ptr += size; 4057 regs[rd] = (uintptr_t)d; 4058 break; 4059 } 4060 4061 case DIF_SUBR_TOUPPER: 4062 case DIF_SUBR_TOLOWER: { 4063 uintptr_t s = tupregs[0].dttk_value; 4064 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4065 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4066 size_t len = dtrace_strlen((char *)s, size); 4067 char lower, upper, convert; 4068 int64_t i; 4069 4070 if (subr == DIF_SUBR_TOUPPER) { 4071 lower = 'a'; 4072 upper = 'z'; 4073 convert = 'A'; 4074 } else { 4075 lower = 'A'; 4076 upper = 'Z'; 4077 convert = 'a'; 4078 } 4079 4080 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4081 regs[rd] = 0; 4082 break; 4083 } 4084 4085 if (!DTRACE_INSCRATCH(mstate, size)) { 4086 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4087 regs[rd] = 0; 4088 break; 4089 } 4090 4091 for (i = 0; i < size - 1; i++) { 4092 if ((c = dtrace_load8(s + i)) == '\0') 4093 break; 4094 4095 if (c >= lower && c <= upper) 4096 c = convert + (c - lower); 4097 4098 dest[i] = c; 4099 } 4100 4101 ASSERT(i < size); 4102 dest[i] = '\0'; 4103 regs[rd] = (uintptr_t)dest; 4104 mstate->dtms_scratch_ptr += size; 4105 break; 4106 } 4107 4108 #if defined(sun) 4109 case DIF_SUBR_GETMAJOR: 4110 #ifdef _LP64 4111 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4112 #else 4113 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4114 #endif 4115 break; 4116 4117 case DIF_SUBR_GETMINOR: 4118 #ifdef _LP64 4119 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4120 #else 4121 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4122 #endif 4123 break; 4124 4125 case DIF_SUBR_DDI_PATHNAME: { 4126 /* 4127 * This one is a galactic mess. We are going to roughly 4128 * emulate ddi_pathname(), but it's made more complicated 4129 * by the fact that we (a) want to include the minor name and 4130 * (b) must proceed iteratively instead of recursively. 4131 */ 4132 uintptr_t dest = mstate->dtms_scratch_ptr; 4133 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4134 char *start = (char *)dest, *end = start + size - 1; 4135 uintptr_t daddr = tupregs[0].dttk_value; 4136 int64_t minor = (int64_t)tupregs[1].dttk_value; 4137 char *s; 4138 int i, len, depth = 0; 4139 4140 /* 4141 * Due to all the pointer jumping we do and context we must 4142 * rely upon, we just mandate that the user must have kernel 4143 * read privileges to use this routine. 4144 */ 4145 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4146 *flags |= CPU_DTRACE_KPRIV; 4147 *illval = daddr; 4148 regs[rd] = 0; 4149 } 4150 4151 if (!DTRACE_INSCRATCH(mstate, size)) { 4152 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4153 regs[rd] = 0; 4154 break; 4155 } 4156 4157 *end = '\0'; 4158 4159 /* 4160 * We want to have a name for the minor. In order to do this, 4161 * we need to walk the minor list from the devinfo. We want 4162 * to be sure that we don't infinitely walk a circular list, 4163 * so we check for circularity by sending a scout pointer 4164 * ahead two elements for every element that we iterate over; 4165 * if the list is circular, these will ultimately point to the 4166 * same element. You may recognize this little trick as the 4167 * answer to a stupid interview question -- one that always 4168 * seems to be asked by those who had to have it laboriously 4169 * explained to them, and who can't even concisely describe 4170 * the conditions under which one would be forced to resort to 4171 * this technique. Needless to say, those conditions are 4172 * found here -- and probably only here. Is this the only use 4173 * of this infamous trick in shipping, production code? If it 4174 * isn't, it probably should be... 4175 */ 4176 if (minor != -1) { 4177 uintptr_t maddr = dtrace_loadptr(daddr + 4178 offsetof(struct dev_info, devi_minor)); 4179 4180 uintptr_t next = offsetof(struct ddi_minor_data, next); 4181 uintptr_t name = offsetof(struct ddi_minor_data, 4182 d_minor) + offsetof(struct ddi_minor, name); 4183 uintptr_t dev = offsetof(struct ddi_minor_data, 4184 d_minor) + offsetof(struct ddi_minor, dev); 4185 uintptr_t scout; 4186 4187 if (maddr != NULL) 4188 scout = dtrace_loadptr(maddr + next); 4189 4190 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4191 uint64_t m; 4192 #ifdef _LP64 4193 m = dtrace_load64(maddr + dev) & MAXMIN64; 4194 #else 4195 m = dtrace_load32(maddr + dev) & MAXMIN; 4196 #endif 4197 if (m != minor) { 4198 maddr = dtrace_loadptr(maddr + next); 4199 4200 if (scout == NULL) 4201 continue; 4202 4203 scout = dtrace_loadptr(scout + next); 4204 4205 if (scout == NULL) 4206 continue; 4207 4208 scout = dtrace_loadptr(scout + next); 4209 4210 if (scout == NULL) 4211 continue; 4212 4213 if (scout == maddr) { 4214 *flags |= CPU_DTRACE_ILLOP; 4215 break; 4216 } 4217 4218 continue; 4219 } 4220 4221 /* 4222 * We have the minor data. Now we need to 4223 * copy the minor's name into the end of the 4224 * pathname. 4225 */ 4226 s = (char *)dtrace_loadptr(maddr + name); 4227 len = dtrace_strlen(s, size); 4228 4229 if (*flags & CPU_DTRACE_FAULT) 4230 break; 4231 4232 if (len != 0) { 4233 if ((end -= (len + 1)) < start) 4234 break; 4235 4236 *end = ':'; 4237 } 4238 4239 for (i = 1; i <= len; i++) 4240 end[i] = dtrace_load8((uintptr_t)s++); 4241 break; 4242 } 4243 } 4244 4245 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4246 ddi_node_state_t devi_state; 4247 4248 devi_state = dtrace_load32(daddr + 4249 offsetof(struct dev_info, devi_node_state)); 4250 4251 if (*flags & CPU_DTRACE_FAULT) 4252 break; 4253 4254 if (devi_state >= DS_INITIALIZED) { 4255 s = (char *)dtrace_loadptr(daddr + 4256 offsetof(struct dev_info, devi_addr)); 4257 len = dtrace_strlen(s, size); 4258 4259 if (*flags & CPU_DTRACE_FAULT) 4260 break; 4261 4262 if (len != 0) { 4263 if ((end -= (len + 1)) < start) 4264 break; 4265 4266 *end = '@'; 4267 } 4268 4269 for (i = 1; i <= len; i++) 4270 end[i] = dtrace_load8((uintptr_t)s++); 4271 } 4272 4273 /* 4274 * Now for the node name... 4275 */ 4276 s = (char *)dtrace_loadptr(daddr + 4277 offsetof(struct dev_info, devi_node_name)); 4278 4279 daddr = dtrace_loadptr(daddr + 4280 offsetof(struct dev_info, devi_parent)); 4281 4282 /* 4283 * If our parent is NULL (that is, if we're the root 4284 * node), we're going to use the special path 4285 * "devices". 4286 */ 4287 if (daddr == 0) 4288 s = "devices"; 4289 4290 len = dtrace_strlen(s, size); 4291 if (*flags & CPU_DTRACE_FAULT) 4292 break; 4293 4294 if ((end -= (len + 1)) < start) 4295 break; 4296 4297 for (i = 1; i <= len; i++) 4298 end[i] = dtrace_load8((uintptr_t)s++); 4299 *end = '/'; 4300 4301 if (depth++ > dtrace_devdepth_max) { 4302 *flags |= CPU_DTRACE_ILLOP; 4303 break; 4304 } 4305 } 4306 4307 if (end < start) 4308 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4309 4310 if (daddr == 0) { 4311 regs[rd] = (uintptr_t)end; 4312 mstate->dtms_scratch_ptr += size; 4313 } 4314 4315 break; 4316 } 4317 #endif 4318 4319 case DIF_SUBR_STRJOIN: { 4320 char *d = (char *)mstate->dtms_scratch_ptr; 4321 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4322 uintptr_t s1 = tupregs[0].dttk_value; 4323 uintptr_t s2 = tupregs[1].dttk_value; 4324 int i = 0; 4325 4326 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4327 !dtrace_strcanload(s2, size, mstate, vstate)) { 4328 regs[rd] = 0; 4329 break; 4330 } 4331 4332 if (!DTRACE_INSCRATCH(mstate, size)) { 4333 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4334 regs[rd] = 0; 4335 break; 4336 } 4337 4338 for (;;) { 4339 if (i >= size) { 4340 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4341 regs[rd] = 0; 4342 break; 4343 } 4344 4345 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4346 i--; 4347 break; 4348 } 4349 } 4350 4351 for (;;) { 4352 if (i >= size) { 4353 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4354 regs[rd] = 0; 4355 break; 4356 } 4357 4358 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4359 break; 4360 } 4361 4362 if (i < size) { 4363 mstate->dtms_scratch_ptr += i; 4364 regs[rd] = (uintptr_t)d; 4365 } 4366 4367 break; 4368 } 4369 4370 case DIF_SUBR_LLTOSTR: { 4371 int64_t i = (int64_t)tupregs[0].dttk_value; 4372 uint64_t val, digit; 4373 uint64_t size = 65; /* enough room for 2^64 in binary */ 4374 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4375 int base = 10; 4376 4377 if (nargs > 1) { 4378 if ((base = tupregs[1].dttk_value) <= 1 || 4379 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4380 *flags |= CPU_DTRACE_ILLOP; 4381 break; 4382 } 4383 } 4384 4385 val = (base == 10 && i < 0) ? i * -1 : i; 4386 4387 if (!DTRACE_INSCRATCH(mstate, size)) { 4388 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4389 regs[rd] = 0; 4390 break; 4391 } 4392 4393 for (*end-- = '\0'; val; val /= base) { 4394 if ((digit = val % base) <= '9' - '0') { 4395 *end-- = '0' + digit; 4396 } else { 4397 *end-- = 'a' + (digit - ('9' - '0') - 1); 4398 } 4399 } 4400 4401 if (i == 0 && base == 16) 4402 *end-- = '0'; 4403 4404 if (base == 16) 4405 *end-- = 'x'; 4406 4407 if (i == 0 || base == 8 || base == 16) 4408 *end-- = '0'; 4409 4410 if (i < 0 && base == 10) 4411 *end-- = '-'; 4412 4413 regs[rd] = (uintptr_t)end + 1; 4414 mstate->dtms_scratch_ptr += size; 4415 break; 4416 } 4417 4418 case DIF_SUBR_HTONS: 4419 case DIF_SUBR_NTOHS: 4420 #if BYTE_ORDER == BIG_ENDIAN 4421 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4422 #else 4423 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4424 #endif 4425 break; 4426 4427 4428 case DIF_SUBR_HTONL: 4429 case DIF_SUBR_NTOHL: 4430 #if BYTE_ORDER == BIG_ENDIAN 4431 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4432 #else 4433 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4434 #endif 4435 break; 4436 4437 4438 case DIF_SUBR_HTONLL: 4439 case DIF_SUBR_NTOHLL: 4440 #if BYTE_ORDER == BIG_ENDIAN 4441 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4442 #else 4443 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4444 #endif 4445 break; 4446 4447 4448 case DIF_SUBR_DIRNAME: 4449 case DIF_SUBR_BASENAME: { 4450 char *dest = (char *)mstate->dtms_scratch_ptr; 4451 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4452 uintptr_t src = tupregs[0].dttk_value; 4453 int i, j, len = dtrace_strlen((char *)src, size); 4454 int lastbase = -1, firstbase = -1, lastdir = -1; 4455 int start, end; 4456 4457 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4458 regs[rd] = 0; 4459 break; 4460 } 4461 4462 if (!DTRACE_INSCRATCH(mstate, size)) { 4463 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4464 regs[rd] = 0; 4465 break; 4466 } 4467 4468 /* 4469 * The basename and dirname for a zero-length string is 4470 * defined to be "." 4471 */ 4472 if (len == 0) { 4473 len = 1; 4474 src = (uintptr_t)"."; 4475 } 4476 4477 /* 4478 * Start from the back of the string, moving back toward the 4479 * front until we see a character that isn't a slash. That 4480 * character is the last character in the basename. 4481 */ 4482 for (i = len - 1; i >= 0; i--) { 4483 if (dtrace_load8(src + i) != '/') 4484 break; 4485 } 4486 4487 if (i >= 0) 4488 lastbase = i; 4489 4490 /* 4491 * Starting from the last character in the basename, move 4492 * towards the front until we find a slash. The character 4493 * that we processed immediately before that is the first 4494 * character in the basename. 4495 */ 4496 for (; i >= 0; i--) { 4497 if (dtrace_load8(src + i) == '/') 4498 break; 4499 } 4500 4501 if (i >= 0) 4502 firstbase = i + 1; 4503 4504 /* 4505 * Now keep going until we find a non-slash character. That 4506 * character is the last character in the dirname. 4507 */ 4508 for (; i >= 0; i--) { 4509 if (dtrace_load8(src + i) != '/') 4510 break; 4511 } 4512 4513 if (i >= 0) 4514 lastdir = i; 4515 4516 ASSERT(!(lastbase == -1 && firstbase != -1)); 4517 ASSERT(!(firstbase == -1 && lastdir != -1)); 4518 4519 if (lastbase == -1) { 4520 /* 4521 * We didn't find a non-slash character. We know that 4522 * the length is non-zero, so the whole string must be 4523 * slashes. In either the dirname or the basename 4524 * case, we return '/'. 4525 */ 4526 ASSERT(firstbase == -1); 4527 firstbase = lastbase = lastdir = 0; 4528 } 4529 4530 if (firstbase == -1) { 4531 /* 4532 * The entire string consists only of a basename 4533 * component. If we're looking for dirname, we need 4534 * to change our string to be just "."; if we're 4535 * looking for a basename, we'll just set the first 4536 * character of the basename to be 0. 4537 */ 4538 if (subr == DIF_SUBR_DIRNAME) { 4539 ASSERT(lastdir == -1); 4540 src = (uintptr_t)"."; 4541 lastdir = 0; 4542 } else { 4543 firstbase = 0; 4544 } 4545 } 4546 4547 if (subr == DIF_SUBR_DIRNAME) { 4548 if (lastdir == -1) { 4549 /* 4550 * We know that we have a slash in the name -- 4551 * or lastdir would be set to 0, above. And 4552 * because lastdir is -1, we know that this 4553 * slash must be the first character. (That 4554 * is, the full string must be of the form 4555 * "/basename".) In this case, the last 4556 * character of the directory name is 0. 4557 */ 4558 lastdir = 0; 4559 } 4560 4561 start = 0; 4562 end = lastdir; 4563 } else { 4564 ASSERT(subr == DIF_SUBR_BASENAME); 4565 ASSERT(firstbase != -1 && lastbase != -1); 4566 start = firstbase; 4567 end = lastbase; 4568 } 4569 4570 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4571 dest[j] = dtrace_load8(src + i); 4572 4573 dest[j] = '\0'; 4574 regs[rd] = (uintptr_t)dest; 4575 mstate->dtms_scratch_ptr += size; 4576 break; 4577 } 4578 4579 case DIF_SUBR_CLEANPATH: { 4580 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4581 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4582 uintptr_t src = tupregs[0].dttk_value; 4583 int i = 0, j = 0; 4584 4585 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4586 regs[rd] = 0; 4587 break; 4588 } 4589 4590 if (!DTRACE_INSCRATCH(mstate, size)) { 4591 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4592 regs[rd] = 0; 4593 break; 4594 } 4595 4596 /* 4597 * Move forward, loading each character. 4598 */ 4599 do { 4600 c = dtrace_load8(src + i++); 4601 next: 4602 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4603 break; 4604 4605 if (c != '/') { 4606 dest[j++] = c; 4607 continue; 4608 } 4609 4610 c = dtrace_load8(src + i++); 4611 4612 if (c == '/') { 4613 /* 4614 * We have two slashes -- we can just advance 4615 * to the next character. 4616 */ 4617 goto next; 4618 } 4619 4620 if (c != '.') { 4621 /* 4622 * This is not "." and it's not ".." -- we can 4623 * just store the "/" and this character and 4624 * drive on. 4625 */ 4626 dest[j++] = '/'; 4627 dest[j++] = c; 4628 continue; 4629 } 4630 4631 c = dtrace_load8(src + i++); 4632 4633 if (c == '/') { 4634 /* 4635 * This is a "/./" component. We're not going 4636 * to store anything in the destination buffer; 4637 * we're just going to go to the next component. 4638 */ 4639 goto next; 4640 } 4641 4642 if (c != '.') { 4643 /* 4644 * This is not ".." -- we can just store the 4645 * "/." and this character and continue 4646 * processing. 4647 */ 4648 dest[j++] = '/'; 4649 dest[j++] = '.'; 4650 dest[j++] = c; 4651 continue; 4652 } 4653 4654 c = dtrace_load8(src + i++); 4655 4656 if (c != '/' && c != '\0') { 4657 /* 4658 * This is not ".." -- it's "..[mumble]". 4659 * We'll store the "/.." and this character 4660 * and continue processing. 4661 */ 4662 dest[j++] = '/'; 4663 dest[j++] = '.'; 4664 dest[j++] = '.'; 4665 dest[j++] = c; 4666 continue; 4667 } 4668 4669 /* 4670 * This is "/../" or "/..\0". We need to back up 4671 * our destination pointer until we find a "/". 4672 */ 4673 i--; 4674 while (j != 0 && dest[--j] != '/') 4675 continue; 4676 4677 if (c == '\0') 4678 dest[++j] = '/'; 4679 } while (c != '\0'); 4680 4681 dest[j] = '\0'; 4682 regs[rd] = (uintptr_t)dest; 4683 mstate->dtms_scratch_ptr += size; 4684 break; 4685 } 4686 4687 case DIF_SUBR_INET_NTOA: 4688 case DIF_SUBR_INET_NTOA6: 4689 case DIF_SUBR_INET_NTOP: { 4690 size_t size; 4691 int af, argi, i; 4692 char *base, *end; 4693 4694 if (subr == DIF_SUBR_INET_NTOP) { 4695 af = (int)tupregs[0].dttk_value; 4696 argi = 1; 4697 } else { 4698 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4699 argi = 0; 4700 } 4701 4702 if (af == AF_INET) { 4703 ipaddr_t ip4; 4704 uint8_t *ptr8, val; 4705 4706 /* 4707 * Safely load the IPv4 address. 4708 */ 4709 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4710 4711 /* 4712 * Check an IPv4 string will fit in scratch. 4713 */ 4714 size = INET_ADDRSTRLEN; 4715 if (!DTRACE_INSCRATCH(mstate, size)) { 4716 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4717 regs[rd] = 0; 4718 break; 4719 } 4720 base = (char *)mstate->dtms_scratch_ptr; 4721 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4722 4723 /* 4724 * Stringify as a dotted decimal quad. 4725 */ 4726 *end-- = '\0'; 4727 ptr8 = (uint8_t *)&ip4; 4728 for (i = 3; i >= 0; i--) { 4729 val = ptr8[i]; 4730 4731 if (val == 0) { 4732 *end-- = '0'; 4733 } else { 4734 for (; val; val /= 10) { 4735 *end-- = '0' + (val % 10); 4736 } 4737 } 4738 4739 if (i > 0) 4740 *end-- = '.'; 4741 } 4742 ASSERT(end + 1 >= base); 4743 4744 } else if (af == AF_INET6) { 4745 struct in6_addr ip6; 4746 int firstzero, tryzero, numzero, v6end; 4747 uint16_t val; 4748 const char digits[] = "0123456789abcdef"; 4749 4750 /* 4751 * Stringify using RFC 1884 convention 2 - 16 bit 4752 * hexadecimal values with a zero-run compression. 4753 * Lower case hexadecimal digits are used. 4754 * eg, fe80::214:4fff:fe0b:76c8. 4755 * The IPv4 embedded form is returned for inet_ntop, 4756 * just the IPv4 string is returned for inet_ntoa6. 4757 */ 4758 4759 /* 4760 * Safely load the IPv6 address. 4761 */ 4762 dtrace_bcopy( 4763 (void *)(uintptr_t)tupregs[argi].dttk_value, 4764 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4765 4766 /* 4767 * Check an IPv6 string will fit in scratch. 4768 */ 4769 size = INET6_ADDRSTRLEN; 4770 if (!DTRACE_INSCRATCH(mstate, size)) { 4771 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4772 regs[rd] = 0; 4773 break; 4774 } 4775 base = (char *)mstate->dtms_scratch_ptr; 4776 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4777 *end-- = '\0'; 4778 4779 /* 4780 * Find the longest run of 16 bit zero values 4781 * for the single allowed zero compression - "::". 4782 */ 4783 firstzero = -1; 4784 tryzero = -1; 4785 numzero = 1; 4786 for (i = 0; i < sizeof (struct in6_addr); i++) { 4787 #if defined(sun) 4788 if (ip6._S6_un._S6_u8[i] == 0 && 4789 #else 4790 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4791 #endif 4792 tryzero == -1 && i % 2 == 0) { 4793 tryzero = i; 4794 continue; 4795 } 4796 4797 if (tryzero != -1 && 4798 #if defined(sun) 4799 (ip6._S6_un._S6_u8[i] != 0 || 4800 #else 4801 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4802 #endif 4803 i == sizeof (struct in6_addr) - 1)) { 4804 4805 if (i - tryzero <= numzero) { 4806 tryzero = -1; 4807 continue; 4808 } 4809 4810 firstzero = tryzero; 4811 numzero = i - i % 2 - tryzero; 4812 tryzero = -1; 4813 4814 #if defined(sun) 4815 if (ip6._S6_un._S6_u8[i] == 0 && 4816 #else 4817 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4818 #endif 4819 i == sizeof (struct in6_addr) - 1) 4820 numzero += 2; 4821 } 4822 } 4823 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4824 4825 /* 4826 * Check for an IPv4 embedded address. 4827 */ 4828 v6end = sizeof (struct in6_addr) - 2; 4829 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4830 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4831 for (i = sizeof (struct in6_addr) - 1; 4832 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4833 ASSERT(end >= base); 4834 4835 #if defined(sun) 4836 val = ip6._S6_un._S6_u8[i]; 4837 #else 4838 val = ip6.__u6_addr.__u6_addr8[i]; 4839 #endif 4840 4841 if (val == 0) { 4842 *end-- = '0'; 4843 } else { 4844 for (; val; val /= 10) { 4845 *end-- = '0' + val % 10; 4846 } 4847 } 4848 4849 if (i > DTRACE_V4MAPPED_OFFSET) 4850 *end-- = '.'; 4851 } 4852 4853 if (subr == DIF_SUBR_INET_NTOA6) 4854 goto inetout; 4855 4856 /* 4857 * Set v6end to skip the IPv4 address that 4858 * we have already stringified. 4859 */ 4860 v6end = 10; 4861 } 4862 4863 /* 4864 * Build the IPv6 string by working through the 4865 * address in reverse. 4866 */ 4867 for (i = v6end; i >= 0; i -= 2) { 4868 ASSERT(end >= base); 4869 4870 if (i == firstzero + numzero - 2) { 4871 *end-- = ':'; 4872 *end-- = ':'; 4873 i -= numzero - 2; 4874 continue; 4875 } 4876 4877 if (i < 14 && i != firstzero - 2) 4878 *end-- = ':'; 4879 4880 #if defined(sun) 4881 val = (ip6._S6_un._S6_u8[i] << 8) + 4882 ip6._S6_un._S6_u8[i + 1]; 4883 #else 4884 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4885 ip6.__u6_addr.__u6_addr8[i + 1]; 4886 #endif 4887 4888 if (val == 0) { 4889 *end-- = '0'; 4890 } else { 4891 for (; val; val /= 16) { 4892 *end-- = digits[val % 16]; 4893 } 4894 } 4895 } 4896 ASSERT(end + 1 >= base); 4897 4898 } else { 4899 /* 4900 * The user didn't use AH_INET or AH_INET6. 4901 */ 4902 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4903 regs[rd] = 0; 4904 break; 4905 } 4906 4907 inetout: regs[rd] = (uintptr_t)end + 1; 4908 mstate->dtms_scratch_ptr += size; 4909 break; 4910 } 4911 4912 case DIF_SUBR_MEMREF: { 4913 uintptr_t size = 2 * sizeof(uintptr_t); 4914 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4915 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4916 4917 /* address and length */ 4918 memref[0] = tupregs[0].dttk_value; 4919 memref[1] = tupregs[1].dttk_value; 4920 4921 regs[rd] = (uintptr_t) memref; 4922 mstate->dtms_scratch_ptr += scratch_size; 4923 break; 4924 } 4925 4926 #if !defined(sun) 4927 case DIF_SUBR_MEMSTR: { 4928 char *str = (char *)mstate->dtms_scratch_ptr; 4929 uintptr_t mem = tupregs[0].dttk_value; 4930 char c = tupregs[1].dttk_value; 4931 size_t size = tupregs[2].dttk_value; 4932 uint8_t n; 4933 int i; 4934 4935 regs[rd] = 0; 4936 4937 if (size == 0) 4938 break; 4939 4940 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 4941 break; 4942 4943 if (!DTRACE_INSCRATCH(mstate, size)) { 4944 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4945 break; 4946 } 4947 4948 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 4949 *flags |= CPU_DTRACE_ILLOP; 4950 break; 4951 } 4952 4953 for (i = 0; i < size - 1; i++) { 4954 n = dtrace_load8(mem++); 4955 str[i] = (n == 0) ? c : n; 4956 } 4957 str[size - 1] = 0; 4958 4959 regs[rd] = (uintptr_t)str; 4960 mstate->dtms_scratch_ptr += size; 4961 break; 4962 } 4963 #endif 4964 4965 case DIF_SUBR_TYPEREF: { 4966 uintptr_t size = 4 * sizeof(uintptr_t); 4967 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4968 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4969 4970 /* address, num_elements, type_str, type_len */ 4971 typeref[0] = tupregs[0].dttk_value; 4972 typeref[1] = tupregs[1].dttk_value; 4973 typeref[2] = tupregs[2].dttk_value; 4974 typeref[3] = tupregs[3].dttk_value; 4975 4976 regs[rd] = (uintptr_t) typeref; 4977 mstate->dtms_scratch_ptr += scratch_size; 4978 break; 4979 } 4980 } 4981 } 4982 4983 /* 4984 * Emulate the execution of DTrace IR instructions specified by the given 4985 * DIF object. This function is deliberately void of assertions as all of 4986 * the necessary checks are handled by a call to dtrace_difo_validate(). 4987 */ 4988 static uint64_t 4989 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4990 dtrace_vstate_t *vstate, dtrace_state_t *state) 4991 { 4992 const dif_instr_t *text = difo->dtdo_buf; 4993 const uint_t textlen = difo->dtdo_len; 4994 const char *strtab = difo->dtdo_strtab; 4995 const uint64_t *inttab = difo->dtdo_inttab; 4996 4997 uint64_t rval = 0; 4998 dtrace_statvar_t *svar; 4999 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 5000 dtrace_difv_t *v; 5001 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5002 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 5003 5004 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 5005 uint64_t regs[DIF_DIR_NREGS]; 5006 uint64_t *tmp; 5007 5008 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 5009 int64_t cc_r; 5010 uint_t pc = 0, id, opc = 0; 5011 uint8_t ttop = 0; 5012 dif_instr_t instr; 5013 uint_t r1, r2, rd; 5014 5015 /* 5016 * We stash the current DIF object into the machine state: we need it 5017 * for subsequent access checking. 5018 */ 5019 mstate->dtms_difo = difo; 5020 5021 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 5022 5023 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 5024 opc = pc; 5025 5026 instr = text[pc++]; 5027 r1 = DIF_INSTR_R1(instr); 5028 r2 = DIF_INSTR_R2(instr); 5029 rd = DIF_INSTR_RD(instr); 5030 5031 switch (DIF_INSTR_OP(instr)) { 5032 case DIF_OP_OR: 5033 regs[rd] = regs[r1] | regs[r2]; 5034 break; 5035 case DIF_OP_XOR: 5036 regs[rd] = regs[r1] ^ regs[r2]; 5037 break; 5038 case DIF_OP_AND: 5039 regs[rd] = regs[r1] & regs[r2]; 5040 break; 5041 case DIF_OP_SLL: 5042 regs[rd] = regs[r1] << regs[r2]; 5043 break; 5044 case DIF_OP_SRL: 5045 regs[rd] = regs[r1] >> regs[r2]; 5046 break; 5047 case DIF_OP_SUB: 5048 regs[rd] = regs[r1] - regs[r2]; 5049 break; 5050 case DIF_OP_ADD: 5051 regs[rd] = regs[r1] + regs[r2]; 5052 break; 5053 case DIF_OP_MUL: 5054 regs[rd] = regs[r1] * regs[r2]; 5055 break; 5056 case DIF_OP_SDIV: 5057 if (regs[r2] == 0) { 5058 regs[rd] = 0; 5059 *flags |= CPU_DTRACE_DIVZERO; 5060 } else { 5061 regs[rd] = (int64_t)regs[r1] / 5062 (int64_t)regs[r2]; 5063 } 5064 break; 5065 5066 case DIF_OP_UDIV: 5067 if (regs[r2] == 0) { 5068 regs[rd] = 0; 5069 *flags |= CPU_DTRACE_DIVZERO; 5070 } else { 5071 regs[rd] = regs[r1] / regs[r2]; 5072 } 5073 break; 5074 5075 case DIF_OP_SREM: 5076 if (regs[r2] == 0) { 5077 regs[rd] = 0; 5078 *flags |= CPU_DTRACE_DIVZERO; 5079 } else { 5080 regs[rd] = (int64_t)regs[r1] % 5081 (int64_t)regs[r2]; 5082 } 5083 break; 5084 5085 case DIF_OP_UREM: 5086 if (regs[r2] == 0) { 5087 regs[rd] = 0; 5088 *flags |= CPU_DTRACE_DIVZERO; 5089 } else { 5090 regs[rd] = regs[r1] % regs[r2]; 5091 } 5092 break; 5093 5094 case DIF_OP_NOT: 5095 regs[rd] = ~regs[r1]; 5096 break; 5097 case DIF_OP_MOV: 5098 regs[rd] = regs[r1]; 5099 break; 5100 case DIF_OP_CMP: 5101 cc_r = regs[r1] - regs[r2]; 5102 cc_n = cc_r < 0; 5103 cc_z = cc_r == 0; 5104 cc_v = 0; 5105 cc_c = regs[r1] < regs[r2]; 5106 break; 5107 case DIF_OP_TST: 5108 cc_n = cc_v = cc_c = 0; 5109 cc_z = regs[r1] == 0; 5110 break; 5111 case DIF_OP_BA: 5112 pc = DIF_INSTR_LABEL(instr); 5113 break; 5114 case DIF_OP_BE: 5115 if (cc_z) 5116 pc = DIF_INSTR_LABEL(instr); 5117 break; 5118 case DIF_OP_BNE: 5119 if (cc_z == 0) 5120 pc = DIF_INSTR_LABEL(instr); 5121 break; 5122 case DIF_OP_BG: 5123 if ((cc_z | (cc_n ^ cc_v)) == 0) 5124 pc = DIF_INSTR_LABEL(instr); 5125 break; 5126 case DIF_OP_BGU: 5127 if ((cc_c | cc_z) == 0) 5128 pc = DIF_INSTR_LABEL(instr); 5129 break; 5130 case DIF_OP_BGE: 5131 if ((cc_n ^ cc_v) == 0) 5132 pc = DIF_INSTR_LABEL(instr); 5133 break; 5134 case DIF_OP_BGEU: 5135 if (cc_c == 0) 5136 pc = DIF_INSTR_LABEL(instr); 5137 break; 5138 case DIF_OP_BL: 5139 if (cc_n ^ cc_v) 5140 pc = DIF_INSTR_LABEL(instr); 5141 break; 5142 case DIF_OP_BLU: 5143 if (cc_c) 5144 pc = DIF_INSTR_LABEL(instr); 5145 break; 5146 case DIF_OP_BLE: 5147 if (cc_z | (cc_n ^ cc_v)) 5148 pc = DIF_INSTR_LABEL(instr); 5149 break; 5150 case DIF_OP_BLEU: 5151 if (cc_c | cc_z) 5152 pc = DIF_INSTR_LABEL(instr); 5153 break; 5154 case DIF_OP_RLDSB: 5155 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5156 *flags |= CPU_DTRACE_KPRIV; 5157 *illval = regs[r1]; 5158 break; 5159 } 5160 /*FALLTHROUGH*/ 5161 case DIF_OP_LDSB: 5162 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5163 break; 5164 case DIF_OP_RLDSH: 5165 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5166 *flags |= CPU_DTRACE_KPRIV; 5167 *illval = regs[r1]; 5168 break; 5169 } 5170 /*FALLTHROUGH*/ 5171 case DIF_OP_LDSH: 5172 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5173 break; 5174 case DIF_OP_RLDSW: 5175 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5176 *flags |= CPU_DTRACE_KPRIV; 5177 *illval = regs[r1]; 5178 break; 5179 } 5180 /*FALLTHROUGH*/ 5181 case DIF_OP_LDSW: 5182 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5183 break; 5184 case DIF_OP_RLDUB: 5185 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5186 *flags |= CPU_DTRACE_KPRIV; 5187 *illval = regs[r1]; 5188 break; 5189 } 5190 /*FALLTHROUGH*/ 5191 case DIF_OP_LDUB: 5192 regs[rd] = dtrace_load8(regs[r1]); 5193 break; 5194 case DIF_OP_RLDUH: 5195 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5196 *flags |= CPU_DTRACE_KPRIV; 5197 *illval = regs[r1]; 5198 break; 5199 } 5200 /*FALLTHROUGH*/ 5201 case DIF_OP_LDUH: 5202 regs[rd] = dtrace_load16(regs[r1]); 5203 break; 5204 case DIF_OP_RLDUW: 5205 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5206 *flags |= CPU_DTRACE_KPRIV; 5207 *illval = regs[r1]; 5208 break; 5209 } 5210 /*FALLTHROUGH*/ 5211 case DIF_OP_LDUW: 5212 regs[rd] = dtrace_load32(regs[r1]); 5213 break; 5214 case DIF_OP_RLDX: 5215 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5216 *flags |= CPU_DTRACE_KPRIV; 5217 *illval = regs[r1]; 5218 break; 5219 } 5220 /*FALLTHROUGH*/ 5221 case DIF_OP_LDX: 5222 regs[rd] = dtrace_load64(regs[r1]); 5223 break; 5224 case DIF_OP_ULDSB: 5225 regs[rd] = (int8_t) 5226 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5227 break; 5228 case DIF_OP_ULDSH: 5229 regs[rd] = (int16_t) 5230 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5231 break; 5232 case DIF_OP_ULDSW: 5233 regs[rd] = (int32_t) 5234 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5235 break; 5236 case DIF_OP_ULDUB: 5237 regs[rd] = 5238 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5239 break; 5240 case DIF_OP_ULDUH: 5241 regs[rd] = 5242 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5243 break; 5244 case DIF_OP_ULDUW: 5245 regs[rd] = 5246 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5247 break; 5248 case DIF_OP_ULDX: 5249 regs[rd] = 5250 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5251 break; 5252 case DIF_OP_RET: 5253 rval = regs[rd]; 5254 pc = textlen; 5255 break; 5256 case DIF_OP_NOP: 5257 break; 5258 case DIF_OP_SETX: 5259 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5260 break; 5261 case DIF_OP_SETS: 5262 regs[rd] = (uint64_t)(uintptr_t) 5263 (strtab + DIF_INSTR_STRING(instr)); 5264 break; 5265 case DIF_OP_SCMP: { 5266 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5267 uintptr_t s1 = regs[r1]; 5268 uintptr_t s2 = regs[r2]; 5269 5270 if (s1 != 0 && 5271 !dtrace_strcanload(s1, sz, mstate, vstate)) 5272 break; 5273 if (s2 != 0 && 5274 !dtrace_strcanload(s2, sz, mstate, vstate)) 5275 break; 5276 5277 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5278 5279 cc_n = cc_r < 0; 5280 cc_z = cc_r == 0; 5281 cc_v = cc_c = 0; 5282 break; 5283 } 5284 case DIF_OP_LDGA: 5285 regs[rd] = dtrace_dif_variable(mstate, state, 5286 r1, regs[r2]); 5287 break; 5288 case DIF_OP_LDGS: 5289 id = DIF_INSTR_VAR(instr); 5290 5291 if (id >= DIF_VAR_OTHER_UBASE) { 5292 uintptr_t a; 5293 5294 id -= DIF_VAR_OTHER_UBASE; 5295 svar = vstate->dtvs_globals[id]; 5296 ASSERT(svar != NULL); 5297 v = &svar->dtsv_var; 5298 5299 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5300 regs[rd] = svar->dtsv_data; 5301 break; 5302 } 5303 5304 a = (uintptr_t)svar->dtsv_data; 5305 5306 if (*(uint8_t *)a == UINT8_MAX) { 5307 /* 5308 * If the 0th byte is set to UINT8_MAX 5309 * then this is to be treated as a 5310 * reference to a NULL variable. 5311 */ 5312 regs[rd] = 0; 5313 } else { 5314 regs[rd] = a + sizeof (uint64_t); 5315 } 5316 5317 break; 5318 } 5319 5320 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5321 break; 5322 5323 case DIF_OP_STGS: 5324 id = DIF_INSTR_VAR(instr); 5325 5326 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5327 id -= DIF_VAR_OTHER_UBASE; 5328 5329 svar = vstate->dtvs_globals[id]; 5330 ASSERT(svar != NULL); 5331 v = &svar->dtsv_var; 5332 5333 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5334 uintptr_t a = (uintptr_t)svar->dtsv_data; 5335 5336 ASSERT(a != 0); 5337 ASSERT(svar->dtsv_size != 0); 5338 5339 if (regs[rd] == 0) { 5340 *(uint8_t *)a = UINT8_MAX; 5341 break; 5342 } else { 5343 *(uint8_t *)a = 0; 5344 a += sizeof (uint64_t); 5345 } 5346 if (!dtrace_vcanload( 5347 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5348 mstate, vstate)) 5349 break; 5350 5351 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5352 (void *)a, &v->dtdv_type); 5353 break; 5354 } 5355 5356 svar->dtsv_data = regs[rd]; 5357 break; 5358 5359 case DIF_OP_LDTA: 5360 /* 5361 * There are no DTrace built-in thread-local arrays at 5362 * present. This opcode is saved for future work. 5363 */ 5364 *flags |= CPU_DTRACE_ILLOP; 5365 regs[rd] = 0; 5366 break; 5367 5368 case DIF_OP_LDLS: 5369 id = DIF_INSTR_VAR(instr); 5370 5371 if (id < DIF_VAR_OTHER_UBASE) { 5372 /* 5373 * For now, this has no meaning. 5374 */ 5375 regs[rd] = 0; 5376 break; 5377 } 5378 5379 id -= DIF_VAR_OTHER_UBASE; 5380 5381 ASSERT(id < vstate->dtvs_nlocals); 5382 ASSERT(vstate->dtvs_locals != NULL); 5383 5384 svar = vstate->dtvs_locals[id]; 5385 ASSERT(svar != NULL); 5386 v = &svar->dtsv_var; 5387 5388 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5389 uintptr_t a = (uintptr_t)svar->dtsv_data; 5390 size_t sz = v->dtdv_type.dtdt_size; 5391 5392 sz += sizeof (uint64_t); 5393 ASSERT(svar->dtsv_size == NCPU * sz); 5394 a += curcpu * sz; 5395 5396 if (*(uint8_t *)a == UINT8_MAX) { 5397 /* 5398 * If the 0th byte is set to UINT8_MAX 5399 * then this is to be treated as a 5400 * reference to a NULL variable. 5401 */ 5402 regs[rd] = 0; 5403 } else { 5404 regs[rd] = a + sizeof (uint64_t); 5405 } 5406 5407 break; 5408 } 5409 5410 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5411 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5412 regs[rd] = tmp[curcpu]; 5413 break; 5414 5415 case DIF_OP_STLS: 5416 id = DIF_INSTR_VAR(instr); 5417 5418 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5419 id -= DIF_VAR_OTHER_UBASE; 5420 ASSERT(id < vstate->dtvs_nlocals); 5421 5422 ASSERT(vstate->dtvs_locals != NULL); 5423 svar = vstate->dtvs_locals[id]; 5424 ASSERT(svar != NULL); 5425 v = &svar->dtsv_var; 5426 5427 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5428 uintptr_t a = (uintptr_t)svar->dtsv_data; 5429 size_t sz = v->dtdv_type.dtdt_size; 5430 5431 sz += sizeof (uint64_t); 5432 ASSERT(svar->dtsv_size == NCPU * sz); 5433 a += curcpu * sz; 5434 5435 if (regs[rd] == 0) { 5436 *(uint8_t *)a = UINT8_MAX; 5437 break; 5438 } else { 5439 *(uint8_t *)a = 0; 5440 a += sizeof (uint64_t); 5441 } 5442 5443 if (!dtrace_vcanload( 5444 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5445 mstate, vstate)) 5446 break; 5447 5448 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5449 (void *)a, &v->dtdv_type); 5450 break; 5451 } 5452 5453 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5454 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5455 tmp[curcpu] = regs[rd]; 5456 break; 5457 5458 case DIF_OP_LDTS: { 5459 dtrace_dynvar_t *dvar; 5460 dtrace_key_t *key; 5461 5462 id = DIF_INSTR_VAR(instr); 5463 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5464 id -= DIF_VAR_OTHER_UBASE; 5465 v = &vstate->dtvs_tlocals[id]; 5466 5467 key = &tupregs[DIF_DTR_NREGS]; 5468 key[0].dttk_value = (uint64_t)id; 5469 key[0].dttk_size = 0; 5470 DTRACE_TLS_THRKEY(key[1].dttk_value); 5471 key[1].dttk_size = 0; 5472 5473 dvar = dtrace_dynvar(dstate, 2, key, 5474 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5475 mstate, vstate); 5476 5477 if (dvar == NULL) { 5478 regs[rd] = 0; 5479 break; 5480 } 5481 5482 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5483 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5484 } else { 5485 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5486 } 5487 5488 break; 5489 } 5490 5491 case DIF_OP_STTS: { 5492 dtrace_dynvar_t *dvar; 5493 dtrace_key_t *key; 5494 5495 id = DIF_INSTR_VAR(instr); 5496 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5497 id -= DIF_VAR_OTHER_UBASE; 5498 5499 key = &tupregs[DIF_DTR_NREGS]; 5500 key[0].dttk_value = (uint64_t)id; 5501 key[0].dttk_size = 0; 5502 DTRACE_TLS_THRKEY(key[1].dttk_value); 5503 key[1].dttk_size = 0; 5504 v = &vstate->dtvs_tlocals[id]; 5505 5506 dvar = dtrace_dynvar(dstate, 2, key, 5507 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5508 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5509 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5510 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5511 5512 /* 5513 * Given that we're storing to thread-local data, 5514 * we need to flush our predicate cache. 5515 */ 5516 curthread->t_predcache = 0; 5517 5518 if (dvar == NULL) 5519 break; 5520 5521 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5522 if (!dtrace_vcanload( 5523 (void *)(uintptr_t)regs[rd], 5524 &v->dtdv_type, mstate, vstate)) 5525 break; 5526 5527 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5528 dvar->dtdv_data, &v->dtdv_type); 5529 } else { 5530 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5531 } 5532 5533 break; 5534 } 5535 5536 case DIF_OP_SRA: 5537 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5538 break; 5539 5540 case DIF_OP_CALL: 5541 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5542 regs, tupregs, ttop, mstate, state); 5543 break; 5544 5545 case DIF_OP_PUSHTR: 5546 if (ttop == DIF_DTR_NREGS) { 5547 *flags |= CPU_DTRACE_TUPOFLOW; 5548 break; 5549 } 5550 5551 if (r1 == DIF_TYPE_STRING) { 5552 /* 5553 * If this is a string type and the size is 0, 5554 * we'll use the system-wide default string 5555 * size. Note that we are _not_ looking at 5556 * the value of the DTRACEOPT_STRSIZE option; 5557 * had this been set, we would expect to have 5558 * a non-zero size value in the "pushtr". 5559 */ 5560 tupregs[ttop].dttk_size = 5561 dtrace_strlen((char *)(uintptr_t)regs[rd], 5562 regs[r2] ? regs[r2] : 5563 dtrace_strsize_default) + 1; 5564 } else { 5565 tupregs[ttop].dttk_size = regs[r2]; 5566 } 5567 5568 tupregs[ttop++].dttk_value = regs[rd]; 5569 break; 5570 5571 case DIF_OP_PUSHTV: 5572 if (ttop == DIF_DTR_NREGS) { 5573 *flags |= CPU_DTRACE_TUPOFLOW; 5574 break; 5575 } 5576 5577 tupregs[ttop].dttk_value = regs[rd]; 5578 tupregs[ttop++].dttk_size = 0; 5579 break; 5580 5581 case DIF_OP_POPTS: 5582 if (ttop != 0) 5583 ttop--; 5584 break; 5585 5586 case DIF_OP_FLUSHTS: 5587 ttop = 0; 5588 break; 5589 5590 case DIF_OP_LDGAA: 5591 case DIF_OP_LDTAA: { 5592 dtrace_dynvar_t *dvar; 5593 dtrace_key_t *key = tupregs; 5594 uint_t nkeys = ttop; 5595 5596 id = DIF_INSTR_VAR(instr); 5597 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5598 id -= DIF_VAR_OTHER_UBASE; 5599 5600 key[nkeys].dttk_value = (uint64_t)id; 5601 key[nkeys++].dttk_size = 0; 5602 5603 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5604 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5605 key[nkeys++].dttk_size = 0; 5606 v = &vstate->dtvs_tlocals[id]; 5607 } else { 5608 v = &vstate->dtvs_globals[id]->dtsv_var; 5609 } 5610 5611 dvar = dtrace_dynvar(dstate, nkeys, key, 5612 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5613 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5614 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5615 5616 if (dvar == NULL) { 5617 regs[rd] = 0; 5618 break; 5619 } 5620 5621 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5622 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5623 } else { 5624 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5625 } 5626 5627 break; 5628 } 5629 5630 case DIF_OP_STGAA: 5631 case DIF_OP_STTAA: { 5632 dtrace_dynvar_t *dvar; 5633 dtrace_key_t *key = tupregs; 5634 uint_t nkeys = ttop; 5635 5636 id = DIF_INSTR_VAR(instr); 5637 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5638 id -= DIF_VAR_OTHER_UBASE; 5639 5640 key[nkeys].dttk_value = (uint64_t)id; 5641 key[nkeys++].dttk_size = 0; 5642 5643 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5644 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5645 key[nkeys++].dttk_size = 0; 5646 v = &vstate->dtvs_tlocals[id]; 5647 } else { 5648 v = &vstate->dtvs_globals[id]->dtsv_var; 5649 } 5650 5651 dvar = dtrace_dynvar(dstate, nkeys, key, 5652 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5653 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5654 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5655 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5656 5657 if (dvar == NULL) 5658 break; 5659 5660 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5661 if (!dtrace_vcanload( 5662 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5663 mstate, vstate)) 5664 break; 5665 5666 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5667 dvar->dtdv_data, &v->dtdv_type); 5668 } else { 5669 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5670 } 5671 5672 break; 5673 } 5674 5675 case DIF_OP_ALLOCS: { 5676 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5677 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5678 5679 /* 5680 * Rounding up the user allocation size could have 5681 * overflowed large, bogus allocations (like -1ULL) to 5682 * 0. 5683 */ 5684 if (size < regs[r1] || 5685 !DTRACE_INSCRATCH(mstate, size)) { 5686 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5687 regs[rd] = 0; 5688 break; 5689 } 5690 5691 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5692 mstate->dtms_scratch_ptr += size; 5693 regs[rd] = ptr; 5694 break; 5695 } 5696 5697 case DIF_OP_COPYS: 5698 if (!dtrace_canstore(regs[rd], regs[r2], 5699 mstate, vstate)) { 5700 *flags |= CPU_DTRACE_BADADDR; 5701 *illval = regs[rd]; 5702 break; 5703 } 5704 5705 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5706 break; 5707 5708 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5709 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5710 break; 5711 5712 case DIF_OP_STB: 5713 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5714 *flags |= CPU_DTRACE_BADADDR; 5715 *illval = regs[rd]; 5716 break; 5717 } 5718 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5719 break; 5720 5721 case DIF_OP_STH: 5722 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5723 *flags |= CPU_DTRACE_BADADDR; 5724 *illval = regs[rd]; 5725 break; 5726 } 5727 if (regs[rd] & 1) { 5728 *flags |= CPU_DTRACE_BADALIGN; 5729 *illval = regs[rd]; 5730 break; 5731 } 5732 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5733 break; 5734 5735 case DIF_OP_STW: 5736 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5737 *flags |= CPU_DTRACE_BADADDR; 5738 *illval = regs[rd]; 5739 break; 5740 } 5741 if (regs[rd] & 3) { 5742 *flags |= CPU_DTRACE_BADALIGN; 5743 *illval = regs[rd]; 5744 break; 5745 } 5746 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5747 break; 5748 5749 case DIF_OP_STX: 5750 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5751 *flags |= CPU_DTRACE_BADADDR; 5752 *illval = regs[rd]; 5753 break; 5754 } 5755 if (regs[rd] & 7) { 5756 *flags |= CPU_DTRACE_BADALIGN; 5757 *illval = regs[rd]; 5758 break; 5759 } 5760 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5761 break; 5762 } 5763 } 5764 5765 if (!(*flags & CPU_DTRACE_FAULT)) 5766 return (rval); 5767 5768 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5769 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5770 5771 return (0); 5772 } 5773 5774 static void 5775 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5776 { 5777 dtrace_probe_t *probe = ecb->dte_probe; 5778 dtrace_provider_t *prov = probe->dtpr_provider; 5779 char c[DTRACE_FULLNAMELEN + 80], *str; 5780 char *msg = "dtrace: breakpoint action at probe "; 5781 char *ecbmsg = " (ecb "; 5782 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5783 uintptr_t val = (uintptr_t)ecb; 5784 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5785 5786 if (dtrace_destructive_disallow) 5787 return; 5788 5789 /* 5790 * It's impossible to be taking action on the NULL probe. 5791 */ 5792 ASSERT(probe != NULL); 5793 5794 /* 5795 * This is a poor man's (destitute man's?) sprintf(): we want to 5796 * print the provider name, module name, function name and name of 5797 * the probe, along with the hex address of the ECB with the breakpoint 5798 * action -- all of which we must place in the character buffer by 5799 * hand. 5800 */ 5801 while (*msg != '\0') 5802 c[i++] = *msg++; 5803 5804 for (str = prov->dtpv_name; *str != '\0'; str++) 5805 c[i++] = *str; 5806 c[i++] = ':'; 5807 5808 for (str = probe->dtpr_mod; *str != '\0'; str++) 5809 c[i++] = *str; 5810 c[i++] = ':'; 5811 5812 for (str = probe->dtpr_func; *str != '\0'; str++) 5813 c[i++] = *str; 5814 c[i++] = ':'; 5815 5816 for (str = probe->dtpr_name; *str != '\0'; str++) 5817 c[i++] = *str; 5818 5819 while (*ecbmsg != '\0') 5820 c[i++] = *ecbmsg++; 5821 5822 while (shift >= 0) { 5823 mask = (uintptr_t)0xf << shift; 5824 5825 if (val >= ((uintptr_t)1 << shift)) 5826 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5827 shift -= 4; 5828 } 5829 5830 c[i++] = ')'; 5831 c[i] = '\0'; 5832 5833 #if defined(sun) 5834 debug_enter(c); 5835 #else 5836 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5837 #endif 5838 } 5839 5840 static void 5841 dtrace_action_panic(dtrace_ecb_t *ecb) 5842 { 5843 dtrace_probe_t *probe = ecb->dte_probe; 5844 5845 /* 5846 * It's impossible to be taking action on the NULL probe. 5847 */ 5848 ASSERT(probe != NULL); 5849 5850 if (dtrace_destructive_disallow) 5851 return; 5852 5853 if (dtrace_panicked != NULL) 5854 return; 5855 5856 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5857 return; 5858 5859 /* 5860 * We won the right to panic. (We want to be sure that only one 5861 * thread calls panic() from dtrace_probe(), and that panic() is 5862 * called exactly once.) 5863 */ 5864 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5865 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5866 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5867 } 5868 5869 static void 5870 dtrace_action_raise(uint64_t sig) 5871 { 5872 if (dtrace_destructive_disallow) 5873 return; 5874 5875 if (sig >= NSIG) { 5876 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5877 return; 5878 } 5879 5880 #if defined(sun) 5881 /* 5882 * raise() has a queue depth of 1 -- we ignore all subsequent 5883 * invocations of the raise() action. 5884 */ 5885 if (curthread->t_dtrace_sig == 0) 5886 curthread->t_dtrace_sig = (uint8_t)sig; 5887 5888 curthread->t_sig_check = 1; 5889 aston(curthread); 5890 #else 5891 struct proc *p = curproc; 5892 PROC_LOCK(p); 5893 kern_psignal(p, sig); 5894 PROC_UNLOCK(p); 5895 #endif 5896 } 5897 5898 static void 5899 dtrace_action_stop(void) 5900 { 5901 if (dtrace_destructive_disallow) 5902 return; 5903 5904 #if defined(sun) 5905 if (!curthread->t_dtrace_stop) { 5906 curthread->t_dtrace_stop = 1; 5907 curthread->t_sig_check = 1; 5908 aston(curthread); 5909 } 5910 #else 5911 struct proc *p = curproc; 5912 PROC_LOCK(p); 5913 kern_psignal(p, SIGSTOP); 5914 PROC_UNLOCK(p); 5915 #endif 5916 } 5917 5918 static void 5919 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5920 { 5921 hrtime_t now; 5922 volatile uint16_t *flags; 5923 #if defined(sun) 5924 cpu_t *cpu = CPU; 5925 #else 5926 cpu_t *cpu = &solaris_cpu[curcpu]; 5927 #endif 5928 5929 if (dtrace_destructive_disallow) 5930 return; 5931 5932 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5933 5934 now = dtrace_gethrtime(); 5935 5936 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5937 /* 5938 * We need to advance the mark to the current time. 5939 */ 5940 cpu->cpu_dtrace_chillmark = now; 5941 cpu->cpu_dtrace_chilled = 0; 5942 } 5943 5944 /* 5945 * Now check to see if the requested chill time would take us over 5946 * the maximum amount of time allowed in the chill interval. (Or 5947 * worse, if the calculation itself induces overflow.) 5948 */ 5949 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5950 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5951 *flags |= CPU_DTRACE_ILLOP; 5952 return; 5953 } 5954 5955 while (dtrace_gethrtime() - now < val) 5956 continue; 5957 5958 /* 5959 * Normally, we assure that the value of the variable "timestamp" does 5960 * not change within an ECB. The presence of chill() represents an 5961 * exception to this rule, however. 5962 */ 5963 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5964 cpu->cpu_dtrace_chilled += val; 5965 } 5966 5967 static void 5968 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5969 uint64_t *buf, uint64_t arg) 5970 { 5971 int nframes = DTRACE_USTACK_NFRAMES(arg); 5972 int strsize = DTRACE_USTACK_STRSIZE(arg); 5973 uint64_t *pcs = &buf[1], *fps; 5974 char *str = (char *)&pcs[nframes]; 5975 int size, offs = 0, i, j; 5976 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5977 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5978 char *sym; 5979 5980 /* 5981 * Should be taking a faster path if string space has not been 5982 * allocated. 5983 */ 5984 ASSERT(strsize != 0); 5985 5986 /* 5987 * We will first allocate some temporary space for the frame pointers. 5988 */ 5989 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5990 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5991 (nframes * sizeof (uint64_t)); 5992 5993 if (!DTRACE_INSCRATCH(mstate, size)) { 5994 /* 5995 * Not enough room for our frame pointers -- need to indicate 5996 * that we ran out of scratch space. 5997 */ 5998 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5999 return; 6000 } 6001 6002 mstate->dtms_scratch_ptr += size; 6003 saved = mstate->dtms_scratch_ptr; 6004 6005 /* 6006 * Now get a stack with both program counters and frame pointers. 6007 */ 6008 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6009 dtrace_getufpstack(buf, fps, nframes + 1); 6010 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6011 6012 /* 6013 * If that faulted, we're cooked. 6014 */ 6015 if (*flags & CPU_DTRACE_FAULT) 6016 goto out; 6017 6018 /* 6019 * Now we want to walk up the stack, calling the USTACK helper. For 6020 * each iteration, we restore the scratch pointer. 6021 */ 6022 for (i = 0; i < nframes; i++) { 6023 mstate->dtms_scratch_ptr = saved; 6024 6025 if (offs >= strsize) 6026 break; 6027 6028 sym = (char *)(uintptr_t)dtrace_helper( 6029 DTRACE_HELPER_ACTION_USTACK, 6030 mstate, state, pcs[i], fps[i]); 6031 6032 /* 6033 * If we faulted while running the helper, we're going to 6034 * clear the fault and null out the corresponding string. 6035 */ 6036 if (*flags & CPU_DTRACE_FAULT) { 6037 *flags &= ~CPU_DTRACE_FAULT; 6038 str[offs++] = '\0'; 6039 continue; 6040 } 6041 6042 if (sym == NULL) { 6043 str[offs++] = '\0'; 6044 continue; 6045 } 6046 6047 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6048 6049 /* 6050 * Now copy in the string that the helper returned to us. 6051 */ 6052 for (j = 0; offs + j < strsize; j++) { 6053 if ((str[offs + j] = sym[j]) == '\0') 6054 break; 6055 } 6056 6057 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6058 6059 offs += j + 1; 6060 } 6061 6062 if (offs >= strsize) { 6063 /* 6064 * If we didn't have room for all of the strings, we don't 6065 * abort processing -- this needn't be a fatal error -- but we 6066 * still want to increment a counter (dts_stkstroverflows) to 6067 * allow this condition to be warned about. (If this is from 6068 * a jstack() action, it is easily tuned via jstackstrsize.) 6069 */ 6070 dtrace_error(&state->dts_stkstroverflows); 6071 } 6072 6073 while (offs < strsize) 6074 str[offs++] = '\0'; 6075 6076 out: 6077 mstate->dtms_scratch_ptr = old; 6078 } 6079 6080 /* 6081 * If you're looking for the epicenter of DTrace, you just found it. This 6082 * is the function called by the provider to fire a probe -- from which all 6083 * subsequent probe-context DTrace activity emanates. 6084 */ 6085 void 6086 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6087 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6088 { 6089 processorid_t cpuid; 6090 dtrace_icookie_t cookie; 6091 dtrace_probe_t *probe; 6092 dtrace_mstate_t mstate; 6093 dtrace_ecb_t *ecb; 6094 dtrace_action_t *act; 6095 intptr_t offs; 6096 size_t size; 6097 int vtime, onintr; 6098 volatile uint16_t *flags; 6099 hrtime_t now; 6100 6101 if (panicstr != NULL) 6102 return; 6103 6104 #if defined(sun) 6105 /* 6106 * Kick out immediately if this CPU is still being born (in which case 6107 * curthread will be set to -1) or the current thread can't allow 6108 * probes in its current context. 6109 */ 6110 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6111 return; 6112 #endif 6113 6114 cookie = dtrace_interrupt_disable(); 6115 probe = dtrace_probes[id - 1]; 6116 cpuid = curcpu; 6117 onintr = CPU_ON_INTR(CPU); 6118 6119 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6120 probe->dtpr_predcache == curthread->t_predcache) { 6121 /* 6122 * We have hit in the predicate cache; we know that 6123 * this predicate would evaluate to be false. 6124 */ 6125 dtrace_interrupt_enable(cookie); 6126 return; 6127 } 6128 6129 #if defined(sun) 6130 if (panic_quiesce) { 6131 #else 6132 if (panicstr != NULL) { 6133 #endif 6134 /* 6135 * We don't trace anything if we're panicking. 6136 */ 6137 dtrace_interrupt_enable(cookie); 6138 return; 6139 } 6140 6141 now = dtrace_gethrtime(); 6142 vtime = dtrace_vtime_references != 0; 6143 6144 if (vtime && curthread->t_dtrace_start) 6145 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6146 6147 mstate.dtms_difo = NULL; 6148 mstate.dtms_probe = probe; 6149 mstate.dtms_strtok = 0; 6150 mstate.dtms_arg[0] = arg0; 6151 mstate.dtms_arg[1] = arg1; 6152 mstate.dtms_arg[2] = arg2; 6153 mstate.dtms_arg[3] = arg3; 6154 mstate.dtms_arg[4] = arg4; 6155 6156 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6157 6158 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6159 dtrace_predicate_t *pred = ecb->dte_predicate; 6160 dtrace_state_t *state = ecb->dte_state; 6161 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6162 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6163 dtrace_vstate_t *vstate = &state->dts_vstate; 6164 dtrace_provider_t *prov = probe->dtpr_provider; 6165 uint64_t tracememsize = 0; 6166 int committed = 0; 6167 caddr_t tomax; 6168 6169 /* 6170 * A little subtlety with the following (seemingly innocuous) 6171 * declaration of the automatic 'val': by looking at the 6172 * code, you might think that it could be declared in the 6173 * action processing loop, below. (That is, it's only used in 6174 * the action processing loop.) However, it must be declared 6175 * out of that scope because in the case of DIF expression 6176 * arguments to aggregating actions, one iteration of the 6177 * action loop will use the last iteration's value. 6178 */ 6179 uint64_t val = 0; 6180 6181 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6182 *flags &= ~CPU_DTRACE_ERROR; 6183 6184 if (prov == dtrace_provider) { 6185 /* 6186 * If dtrace itself is the provider of this probe, 6187 * we're only going to continue processing the ECB if 6188 * arg0 (the dtrace_state_t) is equal to the ECB's 6189 * creating state. (This prevents disjoint consumers 6190 * from seeing one another's metaprobes.) 6191 */ 6192 if (arg0 != (uint64_t)(uintptr_t)state) 6193 continue; 6194 } 6195 6196 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6197 /* 6198 * We're not currently active. If our provider isn't 6199 * the dtrace pseudo provider, we're not interested. 6200 */ 6201 if (prov != dtrace_provider) 6202 continue; 6203 6204 /* 6205 * Now we must further check if we are in the BEGIN 6206 * probe. If we are, we will only continue processing 6207 * if we're still in WARMUP -- if one BEGIN enabling 6208 * has invoked the exit() action, we don't want to 6209 * evaluate subsequent BEGIN enablings. 6210 */ 6211 if (probe->dtpr_id == dtrace_probeid_begin && 6212 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6213 ASSERT(state->dts_activity == 6214 DTRACE_ACTIVITY_DRAINING); 6215 continue; 6216 } 6217 } 6218 6219 if (ecb->dte_cond) { 6220 /* 6221 * If the dte_cond bits indicate that this 6222 * consumer is only allowed to see user-mode firings 6223 * of this probe, call the provider's dtps_usermode() 6224 * entry point to check that the probe was fired 6225 * while in a user context. Skip this ECB if that's 6226 * not the case. 6227 */ 6228 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6229 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6230 probe->dtpr_id, probe->dtpr_arg) == 0) 6231 continue; 6232 6233 #if defined(sun) 6234 /* 6235 * This is more subtle than it looks. We have to be 6236 * absolutely certain that CRED() isn't going to 6237 * change out from under us so it's only legit to 6238 * examine that structure if we're in constrained 6239 * situations. Currently, the only times we'll this 6240 * check is if a non-super-user has enabled the 6241 * profile or syscall providers -- providers that 6242 * allow visibility of all processes. For the 6243 * profile case, the check above will ensure that 6244 * we're examining a user context. 6245 */ 6246 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6247 cred_t *cr; 6248 cred_t *s_cr = 6249 ecb->dte_state->dts_cred.dcr_cred; 6250 proc_t *proc; 6251 6252 ASSERT(s_cr != NULL); 6253 6254 if ((cr = CRED()) == NULL || 6255 s_cr->cr_uid != cr->cr_uid || 6256 s_cr->cr_uid != cr->cr_ruid || 6257 s_cr->cr_uid != cr->cr_suid || 6258 s_cr->cr_gid != cr->cr_gid || 6259 s_cr->cr_gid != cr->cr_rgid || 6260 s_cr->cr_gid != cr->cr_sgid || 6261 (proc = ttoproc(curthread)) == NULL || 6262 (proc->p_flag & SNOCD)) 6263 continue; 6264 } 6265 6266 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6267 cred_t *cr; 6268 cred_t *s_cr = 6269 ecb->dte_state->dts_cred.dcr_cred; 6270 6271 ASSERT(s_cr != NULL); 6272 6273 if ((cr = CRED()) == NULL || 6274 s_cr->cr_zone->zone_id != 6275 cr->cr_zone->zone_id) 6276 continue; 6277 } 6278 #endif 6279 } 6280 6281 if (now - state->dts_alive > dtrace_deadman_timeout) { 6282 /* 6283 * We seem to be dead. Unless we (a) have kernel 6284 * destructive permissions (b) have explicitly enabled 6285 * destructive actions and (c) destructive actions have 6286 * not been disabled, we're going to transition into 6287 * the KILLED state, from which no further processing 6288 * on this state will be performed. 6289 */ 6290 if (!dtrace_priv_kernel_destructive(state) || 6291 !state->dts_cred.dcr_destructive || 6292 dtrace_destructive_disallow) { 6293 void *activity = &state->dts_activity; 6294 dtrace_activity_t current; 6295 6296 do { 6297 current = state->dts_activity; 6298 } while (dtrace_cas32(activity, current, 6299 DTRACE_ACTIVITY_KILLED) != current); 6300 6301 continue; 6302 } 6303 } 6304 6305 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6306 ecb->dte_alignment, state, &mstate)) < 0) 6307 continue; 6308 6309 tomax = buf->dtb_tomax; 6310 ASSERT(tomax != NULL); 6311 6312 if (ecb->dte_size != 0) { 6313 dtrace_rechdr_t dtrh; 6314 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 6315 mstate.dtms_timestamp = dtrace_gethrtime(); 6316 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 6317 } 6318 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 6319 dtrh.dtrh_epid = ecb->dte_epid; 6320 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 6321 mstate.dtms_timestamp); 6322 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 6323 } 6324 6325 mstate.dtms_epid = ecb->dte_epid; 6326 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6327 6328 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6329 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6330 else 6331 mstate.dtms_access = 0; 6332 6333 if (pred != NULL) { 6334 dtrace_difo_t *dp = pred->dtp_difo; 6335 int rval; 6336 6337 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6338 6339 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6340 dtrace_cacheid_t cid = probe->dtpr_predcache; 6341 6342 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6343 /* 6344 * Update the predicate cache... 6345 */ 6346 ASSERT(cid == pred->dtp_cacheid); 6347 curthread->t_predcache = cid; 6348 } 6349 6350 continue; 6351 } 6352 } 6353 6354 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6355 act != NULL; act = act->dta_next) { 6356 size_t valoffs; 6357 dtrace_difo_t *dp; 6358 dtrace_recdesc_t *rec = &act->dta_rec; 6359 6360 size = rec->dtrd_size; 6361 valoffs = offs + rec->dtrd_offset; 6362 6363 if (DTRACEACT_ISAGG(act->dta_kind)) { 6364 uint64_t v = 0xbad; 6365 dtrace_aggregation_t *agg; 6366 6367 agg = (dtrace_aggregation_t *)act; 6368 6369 if ((dp = act->dta_difo) != NULL) 6370 v = dtrace_dif_emulate(dp, 6371 &mstate, vstate, state); 6372 6373 if (*flags & CPU_DTRACE_ERROR) 6374 continue; 6375 6376 /* 6377 * Note that we always pass the expression 6378 * value from the previous iteration of the 6379 * action loop. This value will only be used 6380 * if there is an expression argument to the 6381 * aggregating action, denoted by the 6382 * dtag_hasarg field. 6383 */ 6384 dtrace_aggregate(agg, buf, 6385 offs, aggbuf, v, val); 6386 continue; 6387 } 6388 6389 switch (act->dta_kind) { 6390 case DTRACEACT_STOP: 6391 if (dtrace_priv_proc_destructive(state)) 6392 dtrace_action_stop(); 6393 continue; 6394 6395 case DTRACEACT_BREAKPOINT: 6396 if (dtrace_priv_kernel_destructive(state)) 6397 dtrace_action_breakpoint(ecb); 6398 continue; 6399 6400 case DTRACEACT_PANIC: 6401 if (dtrace_priv_kernel_destructive(state)) 6402 dtrace_action_panic(ecb); 6403 continue; 6404 6405 case DTRACEACT_STACK: 6406 if (!dtrace_priv_kernel(state)) 6407 continue; 6408 6409 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6410 size / sizeof (pc_t), probe->dtpr_aframes, 6411 DTRACE_ANCHORED(probe) ? NULL : 6412 (uint32_t *)arg0); 6413 continue; 6414 6415 case DTRACEACT_JSTACK: 6416 case DTRACEACT_USTACK: 6417 if (!dtrace_priv_proc(state)) 6418 continue; 6419 6420 /* 6421 * See comment in DIF_VAR_PID. 6422 */ 6423 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6424 CPU_ON_INTR(CPU)) { 6425 int depth = DTRACE_USTACK_NFRAMES( 6426 rec->dtrd_arg) + 1; 6427 6428 dtrace_bzero((void *)(tomax + valoffs), 6429 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6430 + depth * sizeof (uint64_t)); 6431 6432 continue; 6433 } 6434 6435 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6436 curproc->p_dtrace_helpers != NULL) { 6437 /* 6438 * This is the slow path -- we have 6439 * allocated string space, and we're 6440 * getting the stack of a process that 6441 * has helpers. Call into a separate 6442 * routine to perform this processing. 6443 */ 6444 dtrace_action_ustack(&mstate, state, 6445 (uint64_t *)(tomax + valoffs), 6446 rec->dtrd_arg); 6447 continue; 6448 } 6449 6450 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6451 dtrace_getupcstack((uint64_t *) 6452 (tomax + valoffs), 6453 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6454 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6455 continue; 6456 6457 default: 6458 break; 6459 } 6460 6461 dp = act->dta_difo; 6462 ASSERT(dp != NULL); 6463 6464 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6465 6466 if (*flags & CPU_DTRACE_ERROR) 6467 continue; 6468 6469 switch (act->dta_kind) { 6470 case DTRACEACT_SPECULATE: { 6471 dtrace_rechdr_t *dtrh; 6472 6473 ASSERT(buf == &state->dts_buffer[cpuid]); 6474 buf = dtrace_speculation_buffer(state, 6475 cpuid, val); 6476 6477 if (buf == NULL) { 6478 *flags |= CPU_DTRACE_DROP; 6479 continue; 6480 } 6481 6482 offs = dtrace_buffer_reserve(buf, 6483 ecb->dte_needed, ecb->dte_alignment, 6484 state, NULL); 6485 6486 if (offs < 0) { 6487 *flags |= CPU_DTRACE_DROP; 6488 continue; 6489 } 6490 6491 tomax = buf->dtb_tomax; 6492 ASSERT(tomax != NULL); 6493 6494 if (ecb->dte_size == 0) 6495 continue; 6496 6497 ASSERT3U(ecb->dte_size, >=, 6498 sizeof (dtrace_rechdr_t)); 6499 dtrh = ((void *)(tomax + offs)); 6500 dtrh->dtrh_epid = ecb->dte_epid; 6501 /* 6502 * When the speculation is committed, all of 6503 * the records in the speculative buffer will 6504 * have their timestamps set to the commit 6505 * time. Until then, it is set to a sentinel 6506 * value, for debugability. 6507 */ 6508 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 6509 continue; 6510 } 6511 6512 case DTRACEACT_PRINTM: { 6513 /* The DIF returns a 'memref'. */ 6514 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6515 6516 /* Get the size from the memref. */ 6517 size = memref[1]; 6518 6519 /* 6520 * Check if the size exceeds the allocated 6521 * buffer size. 6522 */ 6523 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6524 /* Flag a drop! */ 6525 *flags |= CPU_DTRACE_DROP; 6526 continue; 6527 } 6528 6529 /* Store the size in the buffer first. */ 6530 DTRACE_STORE(uintptr_t, tomax, 6531 valoffs, size); 6532 6533 /* 6534 * Offset the buffer address to the start 6535 * of the data. 6536 */ 6537 valoffs += sizeof(uintptr_t); 6538 6539 /* 6540 * Reset to the memory address rather than 6541 * the memref array, then let the BYREF 6542 * code below do the work to store the 6543 * memory data in the buffer. 6544 */ 6545 val = memref[0]; 6546 break; 6547 } 6548 6549 case DTRACEACT_PRINTT: { 6550 /* The DIF returns a 'typeref'. */ 6551 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6552 char c = '\0' + 1; 6553 size_t s; 6554 6555 /* 6556 * Get the type string length and round it 6557 * up so that the data that follows is 6558 * aligned for easy access. 6559 */ 6560 size_t typs = strlen((char *) typeref[2]) + 1; 6561 typs = roundup(typs, sizeof(uintptr_t)); 6562 6563 /* 6564 *Get the size from the typeref using the 6565 * number of elements and the type size. 6566 */ 6567 size = typeref[1] * typeref[3]; 6568 6569 /* 6570 * Check if the size exceeds the allocated 6571 * buffer size. 6572 */ 6573 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6574 /* Flag a drop! */ 6575 *flags |= CPU_DTRACE_DROP; 6576 6577 } 6578 6579 /* Store the size in the buffer first. */ 6580 DTRACE_STORE(uintptr_t, tomax, 6581 valoffs, size); 6582 valoffs += sizeof(uintptr_t); 6583 6584 /* Store the type size in the buffer. */ 6585 DTRACE_STORE(uintptr_t, tomax, 6586 valoffs, typeref[3]); 6587 valoffs += sizeof(uintptr_t); 6588 6589 val = typeref[2]; 6590 6591 for (s = 0; s < typs; s++) { 6592 if (c != '\0') 6593 c = dtrace_load8(val++); 6594 6595 DTRACE_STORE(uint8_t, tomax, 6596 valoffs++, c); 6597 } 6598 6599 /* 6600 * Reset to the memory address rather than 6601 * the typeref array, then let the BYREF 6602 * code below do the work to store the 6603 * memory data in the buffer. 6604 */ 6605 val = typeref[0]; 6606 break; 6607 } 6608 6609 case DTRACEACT_CHILL: 6610 if (dtrace_priv_kernel_destructive(state)) 6611 dtrace_action_chill(&mstate, val); 6612 continue; 6613 6614 case DTRACEACT_RAISE: 6615 if (dtrace_priv_proc_destructive(state)) 6616 dtrace_action_raise(val); 6617 continue; 6618 6619 case DTRACEACT_COMMIT: 6620 ASSERT(!committed); 6621 6622 /* 6623 * We need to commit our buffer state. 6624 */ 6625 if (ecb->dte_size) 6626 buf->dtb_offset = offs + ecb->dte_size; 6627 buf = &state->dts_buffer[cpuid]; 6628 dtrace_speculation_commit(state, cpuid, val); 6629 committed = 1; 6630 continue; 6631 6632 case DTRACEACT_DISCARD: 6633 dtrace_speculation_discard(state, cpuid, val); 6634 continue; 6635 6636 case DTRACEACT_DIFEXPR: 6637 case DTRACEACT_LIBACT: 6638 case DTRACEACT_PRINTF: 6639 case DTRACEACT_PRINTA: 6640 case DTRACEACT_SYSTEM: 6641 case DTRACEACT_FREOPEN: 6642 case DTRACEACT_TRACEMEM: 6643 break; 6644 6645 case DTRACEACT_TRACEMEM_DYNSIZE: 6646 tracememsize = val; 6647 break; 6648 6649 case DTRACEACT_SYM: 6650 case DTRACEACT_MOD: 6651 if (!dtrace_priv_kernel(state)) 6652 continue; 6653 break; 6654 6655 case DTRACEACT_USYM: 6656 case DTRACEACT_UMOD: 6657 case DTRACEACT_UADDR: { 6658 #if defined(sun) 6659 struct pid *pid = curthread->t_procp->p_pidp; 6660 #endif 6661 6662 if (!dtrace_priv_proc(state)) 6663 continue; 6664 6665 DTRACE_STORE(uint64_t, tomax, 6666 #if defined(sun) 6667 valoffs, (uint64_t)pid->pid_id); 6668 #else 6669 valoffs, (uint64_t) curproc->p_pid); 6670 #endif 6671 DTRACE_STORE(uint64_t, tomax, 6672 valoffs + sizeof (uint64_t), val); 6673 6674 continue; 6675 } 6676 6677 case DTRACEACT_EXIT: { 6678 /* 6679 * For the exit action, we are going to attempt 6680 * to atomically set our activity to be 6681 * draining. If this fails (either because 6682 * another CPU has beat us to the exit action, 6683 * or because our current activity is something 6684 * other than ACTIVE or WARMUP), we will 6685 * continue. This assures that the exit action 6686 * can be successfully recorded at most once 6687 * when we're in the ACTIVE state. If we're 6688 * encountering the exit() action while in 6689 * COOLDOWN, however, we want to honor the new 6690 * status code. (We know that we're the only 6691 * thread in COOLDOWN, so there is no race.) 6692 */ 6693 void *activity = &state->dts_activity; 6694 dtrace_activity_t current = state->dts_activity; 6695 6696 if (current == DTRACE_ACTIVITY_COOLDOWN) 6697 break; 6698 6699 if (current != DTRACE_ACTIVITY_WARMUP) 6700 current = DTRACE_ACTIVITY_ACTIVE; 6701 6702 if (dtrace_cas32(activity, current, 6703 DTRACE_ACTIVITY_DRAINING) != current) { 6704 *flags |= CPU_DTRACE_DROP; 6705 continue; 6706 } 6707 6708 break; 6709 } 6710 6711 default: 6712 ASSERT(0); 6713 } 6714 6715 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6716 uintptr_t end = valoffs + size; 6717 6718 if (tracememsize != 0 && 6719 valoffs + tracememsize < end) { 6720 end = valoffs + tracememsize; 6721 tracememsize = 0; 6722 } 6723 6724 if (!dtrace_vcanload((void *)(uintptr_t)val, 6725 &dp->dtdo_rtype, &mstate, vstate)) 6726 continue; 6727 6728 /* 6729 * If this is a string, we're going to only 6730 * load until we find the zero byte -- after 6731 * which we'll store zero bytes. 6732 */ 6733 if (dp->dtdo_rtype.dtdt_kind == 6734 DIF_TYPE_STRING) { 6735 char c = '\0' + 1; 6736 int intuple = act->dta_intuple; 6737 size_t s; 6738 6739 for (s = 0; s < size; s++) { 6740 if (c != '\0') 6741 c = dtrace_load8(val++); 6742 6743 DTRACE_STORE(uint8_t, tomax, 6744 valoffs++, c); 6745 6746 if (c == '\0' && intuple) 6747 break; 6748 } 6749 6750 continue; 6751 } 6752 6753 while (valoffs < end) { 6754 DTRACE_STORE(uint8_t, tomax, valoffs++, 6755 dtrace_load8(val++)); 6756 } 6757 6758 continue; 6759 } 6760 6761 switch (size) { 6762 case 0: 6763 break; 6764 6765 case sizeof (uint8_t): 6766 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6767 break; 6768 case sizeof (uint16_t): 6769 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6770 break; 6771 case sizeof (uint32_t): 6772 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6773 break; 6774 case sizeof (uint64_t): 6775 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6776 break; 6777 default: 6778 /* 6779 * Any other size should have been returned by 6780 * reference, not by value. 6781 */ 6782 ASSERT(0); 6783 break; 6784 } 6785 } 6786 6787 if (*flags & CPU_DTRACE_DROP) 6788 continue; 6789 6790 if (*flags & CPU_DTRACE_FAULT) { 6791 int ndx; 6792 dtrace_action_t *err; 6793 6794 buf->dtb_errors++; 6795 6796 if (probe->dtpr_id == dtrace_probeid_error) { 6797 /* 6798 * There's nothing we can do -- we had an 6799 * error on the error probe. We bump an 6800 * error counter to at least indicate that 6801 * this condition happened. 6802 */ 6803 dtrace_error(&state->dts_dblerrors); 6804 continue; 6805 } 6806 6807 if (vtime) { 6808 /* 6809 * Before recursing on dtrace_probe(), we 6810 * need to explicitly clear out our start 6811 * time to prevent it from being accumulated 6812 * into t_dtrace_vtime. 6813 */ 6814 curthread->t_dtrace_start = 0; 6815 } 6816 6817 /* 6818 * Iterate over the actions to figure out which action 6819 * we were processing when we experienced the error. 6820 * Note that act points _past_ the faulting action; if 6821 * act is ecb->dte_action, the fault was in the 6822 * predicate, if it's ecb->dte_action->dta_next it's 6823 * in action #1, and so on. 6824 */ 6825 for (err = ecb->dte_action, ndx = 0; 6826 err != act; err = err->dta_next, ndx++) 6827 continue; 6828 6829 dtrace_probe_error(state, ecb->dte_epid, ndx, 6830 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6831 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6832 cpu_core[cpuid].cpuc_dtrace_illval); 6833 6834 continue; 6835 } 6836 6837 if (!committed) 6838 buf->dtb_offset = offs + ecb->dte_size; 6839 } 6840 6841 if (vtime) 6842 curthread->t_dtrace_start = dtrace_gethrtime(); 6843 6844 dtrace_interrupt_enable(cookie); 6845 } 6846 6847 /* 6848 * DTrace Probe Hashing Functions 6849 * 6850 * The functions in this section (and indeed, the functions in remaining 6851 * sections) are not _called_ from probe context. (Any exceptions to this are 6852 * marked with a "Note:".) Rather, they are called from elsewhere in the 6853 * DTrace framework to look-up probes in, add probes to and remove probes from 6854 * the DTrace probe hashes. (Each probe is hashed by each element of the 6855 * probe tuple -- allowing for fast lookups, regardless of what was 6856 * specified.) 6857 */ 6858 static uint_t 6859 dtrace_hash_str(const char *p) 6860 { 6861 unsigned int g; 6862 uint_t hval = 0; 6863 6864 while (*p) { 6865 hval = (hval << 4) + *p++; 6866 if ((g = (hval & 0xf0000000)) != 0) 6867 hval ^= g >> 24; 6868 hval &= ~g; 6869 } 6870 return (hval); 6871 } 6872 6873 static dtrace_hash_t * 6874 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6875 { 6876 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6877 6878 hash->dth_stroffs = stroffs; 6879 hash->dth_nextoffs = nextoffs; 6880 hash->dth_prevoffs = prevoffs; 6881 6882 hash->dth_size = 1; 6883 hash->dth_mask = hash->dth_size - 1; 6884 6885 hash->dth_tab = kmem_zalloc(hash->dth_size * 6886 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6887 6888 return (hash); 6889 } 6890 6891 static void 6892 dtrace_hash_destroy(dtrace_hash_t *hash) 6893 { 6894 #ifdef DEBUG 6895 int i; 6896 6897 for (i = 0; i < hash->dth_size; i++) 6898 ASSERT(hash->dth_tab[i] == NULL); 6899 #endif 6900 6901 kmem_free(hash->dth_tab, 6902 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6903 kmem_free(hash, sizeof (dtrace_hash_t)); 6904 } 6905 6906 static void 6907 dtrace_hash_resize(dtrace_hash_t *hash) 6908 { 6909 int size = hash->dth_size, i, ndx; 6910 int new_size = hash->dth_size << 1; 6911 int new_mask = new_size - 1; 6912 dtrace_hashbucket_t **new_tab, *bucket, *next; 6913 6914 ASSERT((new_size & new_mask) == 0); 6915 6916 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6917 6918 for (i = 0; i < size; i++) { 6919 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6920 dtrace_probe_t *probe = bucket->dthb_chain; 6921 6922 ASSERT(probe != NULL); 6923 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6924 6925 next = bucket->dthb_next; 6926 bucket->dthb_next = new_tab[ndx]; 6927 new_tab[ndx] = bucket; 6928 } 6929 } 6930 6931 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6932 hash->dth_tab = new_tab; 6933 hash->dth_size = new_size; 6934 hash->dth_mask = new_mask; 6935 } 6936 6937 static void 6938 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6939 { 6940 int hashval = DTRACE_HASHSTR(hash, new); 6941 int ndx = hashval & hash->dth_mask; 6942 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6943 dtrace_probe_t **nextp, **prevp; 6944 6945 for (; bucket != NULL; bucket = bucket->dthb_next) { 6946 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6947 goto add; 6948 } 6949 6950 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6951 dtrace_hash_resize(hash); 6952 dtrace_hash_add(hash, new); 6953 return; 6954 } 6955 6956 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6957 bucket->dthb_next = hash->dth_tab[ndx]; 6958 hash->dth_tab[ndx] = bucket; 6959 hash->dth_nbuckets++; 6960 6961 add: 6962 nextp = DTRACE_HASHNEXT(hash, new); 6963 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6964 *nextp = bucket->dthb_chain; 6965 6966 if (bucket->dthb_chain != NULL) { 6967 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6968 ASSERT(*prevp == NULL); 6969 *prevp = new; 6970 } 6971 6972 bucket->dthb_chain = new; 6973 bucket->dthb_len++; 6974 } 6975 6976 static dtrace_probe_t * 6977 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6978 { 6979 int hashval = DTRACE_HASHSTR(hash, template); 6980 int ndx = hashval & hash->dth_mask; 6981 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6982 6983 for (; bucket != NULL; bucket = bucket->dthb_next) { 6984 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6985 return (bucket->dthb_chain); 6986 } 6987 6988 return (NULL); 6989 } 6990 6991 static int 6992 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6993 { 6994 int hashval = DTRACE_HASHSTR(hash, template); 6995 int ndx = hashval & hash->dth_mask; 6996 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6997 6998 for (; bucket != NULL; bucket = bucket->dthb_next) { 6999 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7000 return (bucket->dthb_len); 7001 } 7002 7003 return (0); 7004 } 7005 7006 static void 7007 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 7008 { 7009 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 7010 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7011 7012 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 7013 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 7014 7015 /* 7016 * Find the bucket that we're removing this probe from. 7017 */ 7018 for (; bucket != NULL; bucket = bucket->dthb_next) { 7019 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 7020 break; 7021 } 7022 7023 ASSERT(bucket != NULL); 7024 7025 if (*prevp == NULL) { 7026 if (*nextp == NULL) { 7027 /* 7028 * The removed probe was the only probe on this 7029 * bucket; we need to remove the bucket. 7030 */ 7031 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 7032 7033 ASSERT(bucket->dthb_chain == probe); 7034 ASSERT(b != NULL); 7035 7036 if (b == bucket) { 7037 hash->dth_tab[ndx] = bucket->dthb_next; 7038 } else { 7039 while (b->dthb_next != bucket) 7040 b = b->dthb_next; 7041 b->dthb_next = bucket->dthb_next; 7042 } 7043 7044 ASSERT(hash->dth_nbuckets > 0); 7045 hash->dth_nbuckets--; 7046 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 7047 return; 7048 } 7049 7050 bucket->dthb_chain = *nextp; 7051 } else { 7052 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 7053 } 7054 7055 if (*nextp != NULL) 7056 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 7057 } 7058 7059 /* 7060 * DTrace Utility Functions 7061 * 7062 * These are random utility functions that are _not_ called from probe context. 7063 */ 7064 static int 7065 dtrace_badattr(const dtrace_attribute_t *a) 7066 { 7067 return (a->dtat_name > DTRACE_STABILITY_MAX || 7068 a->dtat_data > DTRACE_STABILITY_MAX || 7069 a->dtat_class > DTRACE_CLASS_MAX); 7070 } 7071 7072 /* 7073 * Return a duplicate copy of a string. If the specified string is NULL, 7074 * this function returns a zero-length string. 7075 */ 7076 static char * 7077 dtrace_strdup(const char *str) 7078 { 7079 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 7080 7081 if (str != NULL) 7082 (void) strcpy(new, str); 7083 7084 return (new); 7085 } 7086 7087 #define DTRACE_ISALPHA(c) \ 7088 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 7089 7090 static int 7091 dtrace_badname(const char *s) 7092 { 7093 char c; 7094 7095 if (s == NULL || (c = *s++) == '\0') 7096 return (0); 7097 7098 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 7099 return (1); 7100 7101 while ((c = *s++) != '\0') { 7102 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7103 c != '-' && c != '_' && c != '.' && c != '`') 7104 return (1); 7105 } 7106 7107 return (0); 7108 } 7109 7110 static void 7111 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7112 { 7113 uint32_t priv; 7114 7115 #if defined(sun) 7116 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7117 /* 7118 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7119 */ 7120 priv = DTRACE_PRIV_ALL; 7121 } else { 7122 *uidp = crgetuid(cr); 7123 *zoneidp = crgetzoneid(cr); 7124 7125 priv = 0; 7126 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7127 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7128 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7129 priv |= DTRACE_PRIV_USER; 7130 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7131 priv |= DTRACE_PRIV_PROC; 7132 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7133 priv |= DTRACE_PRIV_OWNER; 7134 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7135 priv |= DTRACE_PRIV_ZONEOWNER; 7136 } 7137 #else 7138 priv = DTRACE_PRIV_ALL; 7139 #endif 7140 7141 *privp = priv; 7142 } 7143 7144 #ifdef DTRACE_ERRDEBUG 7145 static void 7146 dtrace_errdebug(const char *str) 7147 { 7148 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7149 int occupied = 0; 7150 7151 mutex_enter(&dtrace_errlock); 7152 dtrace_errlast = str; 7153 dtrace_errthread = curthread; 7154 7155 while (occupied++ < DTRACE_ERRHASHSZ) { 7156 if (dtrace_errhash[hval].dter_msg == str) { 7157 dtrace_errhash[hval].dter_count++; 7158 goto out; 7159 } 7160 7161 if (dtrace_errhash[hval].dter_msg != NULL) { 7162 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7163 continue; 7164 } 7165 7166 dtrace_errhash[hval].dter_msg = str; 7167 dtrace_errhash[hval].dter_count = 1; 7168 goto out; 7169 } 7170 7171 panic("dtrace: undersized error hash"); 7172 out: 7173 mutex_exit(&dtrace_errlock); 7174 } 7175 #endif 7176 7177 /* 7178 * DTrace Matching Functions 7179 * 7180 * These functions are used to match groups of probes, given some elements of 7181 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7182 */ 7183 static int 7184 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7185 zoneid_t zoneid) 7186 { 7187 if (priv != DTRACE_PRIV_ALL) { 7188 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7189 uint32_t match = priv & ppriv; 7190 7191 /* 7192 * No PRIV_DTRACE_* privileges... 7193 */ 7194 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7195 DTRACE_PRIV_KERNEL)) == 0) 7196 return (0); 7197 7198 /* 7199 * No matching bits, but there were bits to match... 7200 */ 7201 if (match == 0 && ppriv != 0) 7202 return (0); 7203 7204 /* 7205 * Need to have permissions to the process, but don't... 7206 */ 7207 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7208 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7209 return (0); 7210 } 7211 7212 /* 7213 * Need to be in the same zone unless we possess the 7214 * privilege to examine all zones. 7215 */ 7216 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7217 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7218 return (0); 7219 } 7220 } 7221 7222 return (1); 7223 } 7224 7225 /* 7226 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7227 * consists of input pattern strings and an ops-vector to evaluate them. 7228 * This function returns >0 for match, 0 for no match, and <0 for error. 7229 */ 7230 static int 7231 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7232 uint32_t priv, uid_t uid, zoneid_t zoneid) 7233 { 7234 dtrace_provider_t *pvp = prp->dtpr_provider; 7235 int rv; 7236 7237 if (pvp->dtpv_defunct) 7238 return (0); 7239 7240 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7241 return (rv); 7242 7243 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7244 return (rv); 7245 7246 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7247 return (rv); 7248 7249 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7250 return (rv); 7251 7252 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7253 return (0); 7254 7255 return (rv); 7256 } 7257 7258 /* 7259 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7260 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7261 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7262 * In addition, all of the recursion cases except for '*' matching have been 7263 * unwound. For '*', we still implement recursive evaluation, but a depth 7264 * counter is maintained and matching is aborted if we recurse too deep. 7265 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7266 */ 7267 static int 7268 dtrace_match_glob(const char *s, const char *p, int depth) 7269 { 7270 const char *olds; 7271 char s1, c; 7272 int gs; 7273 7274 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7275 return (-1); 7276 7277 if (s == NULL) 7278 s = ""; /* treat NULL as empty string */ 7279 7280 top: 7281 olds = s; 7282 s1 = *s++; 7283 7284 if (p == NULL) 7285 return (0); 7286 7287 if ((c = *p++) == '\0') 7288 return (s1 == '\0'); 7289 7290 switch (c) { 7291 case '[': { 7292 int ok = 0, notflag = 0; 7293 char lc = '\0'; 7294 7295 if (s1 == '\0') 7296 return (0); 7297 7298 if (*p == '!') { 7299 notflag = 1; 7300 p++; 7301 } 7302 7303 if ((c = *p++) == '\0') 7304 return (0); 7305 7306 do { 7307 if (c == '-' && lc != '\0' && *p != ']') { 7308 if ((c = *p++) == '\0') 7309 return (0); 7310 if (c == '\\' && (c = *p++) == '\0') 7311 return (0); 7312 7313 if (notflag) { 7314 if (s1 < lc || s1 > c) 7315 ok++; 7316 else 7317 return (0); 7318 } else if (lc <= s1 && s1 <= c) 7319 ok++; 7320 7321 } else if (c == '\\' && (c = *p++) == '\0') 7322 return (0); 7323 7324 lc = c; /* save left-hand 'c' for next iteration */ 7325 7326 if (notflag) { 7327 if (s1 != c) 7328 ok++; 7329 else 7330 return (0); 7331 } else if (s1 == c) 7332 ok++; 7333 7334 if ((c = *p++) == '\0') 7335 return (0); 7336 7337 } while (c != ']'); 7338 7339 if (ok) 7340 goto top; 7341 7342 return (0); 7343 } 7344 7345 case '\\': 7346 if ((c = *p++) == '\0') 7347 return (0); 7348 /*FALLTHRU*/ 7349 7350 default: 7351 if (c != s1) 7352 return (0); 7353 /*FALLTHRU*/ 7354 7355 case '?': 7356 if (s1 != '\0') 7357 goto top; 7358 return (0); 7359 7360 case '*': 7361 while (*p == '*') 7362 p++; /* consecutive *'s are identical to a single one */ 7363 7364 if (*p == '\0') 7365 return (1); 7366 7367 for (s = olds; *s != '\0'; s++) { 7368 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7369 return (gs); 7370 } 7371 7372 return (0); 7373 } 7374 } 7375 7376 /*ARGSUSED*/ 7377 static int 7378 dtrace_match_string(const char *s, const char *p, int depth) 7379 { 7380 return (s != NULL && strcmp(s, p) == 0); 7381 } 7382 7383 /*ARGSUSED*/ 7384 static int 7385 dtrace_match_nul(const char *s, const char *p, int depth) 7386 { 7387 return (1); /* always match the empty pattern */ 7388 } 7389 7390 /*ARGSUSED*/ 7391 static int 7392 dtrace_match_nonzero(const char *s, const char *p, int depth) 7393 { 7394 return (s != NULL && s[0] != '\0'); 7395 } 7396 7397 static int 7398 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7399 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7400 { 7401 dtrace_probe_t template, *probe; 7402 dtrace_hash_t *hash = NULL; 7403 int len, best = INT_MAX, nmatched = 0; 7404 dtrace_id_t i; 7405 7406 ASSERT(MUTEX_HELD(&dtrace_lock)); 7407 7408 /* 7409 * If the probe ID is specified in the key, just lookup by ID and 7410 * invoke the match callback once if a matching probe is found. 7411 */ 7412 if (pkp->dtpk_id != DTRACE_IDNONE) { 7413 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7414 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7415 (void) (*matched)(probe, arg); 7416 nmatched++; 7417 } 7418 return (nmatched); 7419 } 7420 7421 template.dtpr_mod = (char *)pkp->dtpk_mod; 7422 template.dtpr_func = (char *)pkp->dtpk_func; 7423 template.dtpr_name = (char *)pkp->dtpk_name; 7424 7425 /* 7426 * We want to find the most distinct of the module name, function 7427 * name, and name. So for each one that is not a glob pattern or 7428 * empty string, we perform a lookup in the corresponding hash and 7429 * use the hash table with the fewest collisions to do our search. 7430 */ 7431 if (pkp->dtpk_mmatch == &dtrace_match_string && 7432 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7433 best = len; 7434 hash = dtrace_bymod; 7435 } 7436 7437 if (pkp->dtpk_fmatch == &dtrace_match_string && 7438 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7439 best = len; 7440 hash = dtrace_byfunc; 7441 } 7442 7443 if (pkp->dtpk_nmatch == &dtrace_match_string && 7444 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7445 best = len; 7446 hash = dtrace_byname; 7447 } 7448 7449 /* 7450 * If we did not select a hash table, iterate over every probe and 7451 * invoke our callback for each one that matches our input probe key. 7452 */ 7453 if (hash == NULL) { 7454 for (i = 0; i < dtrace_nprobes; i++) { 7455 if ((probe = dtrace_probes[i]) == NULL || 7456 dtrace_match_probe(probe, pkp, priv, uid, 7457 zoneid) <= 0) 7458 continue; 7459 7460 nmatched++; 7461 7462 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7463 break; 7464 } 7465 7466 return (nmatched); 7467 } 7468 7469 /* 7470 * If we selected a hash table, iterate over each probe of the same key 7471 * name and invoke the callback for every probe that matches the other 7472 * attributes of our input probe key. 7473 */ 7474 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7475 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7476 7477 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7478 continue; 7479 7480 nmatched++; 7481 7482 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7483 break; 7484 } 7485 7486 return (nmatched); 7487 } 7488 7489 /* 7490 * Return the function pointer dtrace_probecmp() should use to compare the 7491 * specified pattern with a string. For NULL or empty patterns, we select 7492 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7493 * For non-empty non-glob strings, we use dtrace_match_string(). 7494 */ 7495 static dtrace_probekey_f * 7496 dtrace_probekey_func(const char *p) 7497 { 7498 char c; 7499 7500 if (p == NULL || *p == '\0') 7501 return (&dtrace_match_nul); 7502 7503 while ((c = *p++) != '\0') { 7504 if (c == '[' || c == '?' || c == '*' || c == '\\') 7505 return (&dtrace_match_glob); 7506 } 7507 7508 return (&dtrace_match_string); 7509 } 7510 7511 /* 7512 * Build a probe comparison key for use with dtrace_match_probe() from the 7513 * given probe description. By convention, a null key only matches anchored 7514 * probes: if each field is the empty string, reset dtpk_fmatch to 7515 * dtrace_match_nonzero(). 7516 */ 7517 static void 7518 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7519 { 7520 pkp->dtpk_prov = pdp->dtpd_provider; 7521 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7522 7523 pkp->dtpk_mod = pdp->dtpd_mod; 7524 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7525 7526 pkp->dtpk_func = pdp->dtpd_func; 7527 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7528 7529 pkp->dtpk_name = pdp->dtpd_name; 7530 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7531 7532 pkp->dtpk_id = pdp->dtpd_id; 7533 7534 if (pkp->dtpk_id == DTRACE_IDNONE && 7535 pkp->dtpk_pmatch == &dtrace_match_nul && 7536 pkp->dtpk_mmatch == &dtrace_match_nul && 7537 pkp->dtpk_fmatch == &dtrace_match_nul && 7538 pkp->dtpk_nmatch == &dtrace_match_nul) 7539 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7540 } 7541 7542 /* 7543 * DTrace Provider-to-Framework API Functions 7544 * 7545 * These functions implement much of the Provider-to-Framework API, as 7546 * described in <sys/dtrace.h>. The parts of the API not in this section are 7547 * the functions in the API for probe management (found below), and 7548 * dtrace_probe() itself (found above). 7549 */ 7550 7551 /* 7552 * Register the calling provider with the DTrace framework. This should 7553 * generally be called by DTrace providers in their attach(9E) entry point. 7554 */ 7555 int 7556 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7557 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7558 { 7559 dtrace_provider_t *provider; 7560 7561 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7562 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7563 "arguments", name ? name : "<NULL>"); 7564 return (EINVAL); 7565 } 7566 7567 if (name[0] == '\0' || dtrace_badname(name)) { 7568 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7569 "provider name", name); 7570 return (EINVAL); 7571 } 7572 7573 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7574 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7575 pops->dtps_destroy == NULL || 7576 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7577 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7578 "provider ops", name); 7579 return (EINVAL); 7580 } 7581 7582 if (dtrace_badattr(&pap->dtpa_provider) || 7583 dtrace_badattr(&pap->dtpa_mod) || 7584 dtrace_badattr(&pap->dtpa_func) || 7585 dtrace_badattr(&pap->dtpa_name) || 7586 dtrace_badattr(&pap->dtpa_args)) { 7587 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7588 "provider attributes", name); 7589 return (EINVAL); 7590 } 7591 7592 if (priv & ~DTRACE_PRIV_ALL) { 7593 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7594 "privilege attributes", name); 7595 return (EINVAL); 7596 } 7597 7598 if ((priv & DTRACE_PRIV_KERNEL) && 7599 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7600 pops->dtps_usermode == NULL) { 7601 cmn_err(CE_WARN, "failed to register provider '%s': need " 7602 "dtps_usermode() op for given privilege attributes", name); 7603 return (EINVAL); 7604 } 7605 7606 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7607 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7608 (void) strcpy(provider->dtpv_name, name); 7609 7610 provider->dtpv_attr = *pap; 7611 provider->dtpv_priv.dtpp_flags = priv; 7612 if (cr != NULL) { 7613 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7614 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7615 } 7616 provider->dtpv_pops = *pops; 7617 7618 if (pops->dtps_provide == NULL) { 7619 ASSERT(pops->dtps_provide_module != NULL); 7620 provider->dtpv_pops.dtps_provide = 7621 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7622 } 7623 7624 if (pops->dtps_provide_module == NULL) { 7625 ASSERT(pops->dtps_provide != NULL); 7626 provider->dtpv_pops.dtps_provide_module = 7627 (void (*)(void *, modctl_t *))dtrace_nullop; 7628 } 7629 7630 if (pops->dtps_suspend == NULL) { 7631 ASSERT(pops->dtps_resume == NULL); 7632 provider->dtpv_pops.dtps_suspend = 7633 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7634 provider->dtpv_pops.dtps_resume = 7635 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7636 } 7637 7638 provider->dtpv_arg = arg; 7639 *idp = (dtrace_provider_id_t)provider; 7640 7641 if (pops == &dtrace_provider_ops) { 7642 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7643 ASSERT(MUTEX_HELD(&dtrace_lock)); 7644 ASSERT(dtrace_anon.dta_enabling == NULL); 7645 7646 /* 7647 * We make sure that the DTrace provider is at the head of 7648 * the provider chain. 7649 */ 7650 provider->dtpv_next = dtrace_provider; 7651 dtrace_provider = provider; 7652 return (0); 7653 } 7654 7655 mutex_enter(&dtrace_provider_lock); 7656 mutex_enter(&dtrace_lock); 7657 7658 /* 7659 * If there is at least one provider registered, we'll add this 7660 * provider after the first provider. 7661 */ 7662 if (dtrace_provider != NULL) { 7663 provider->dtpv_next = dtrace_provider->dtpv_next; 7664 dtrace_provider->dtpv_next = provider; 7665 } else { 7666 dtrace_provider = provider; 7667 } 7668 7669 if (dtrace_retained != NULL) { 7670 dtrace_enabling_provide(provider); 7671 7672 /* 7673 * Now we need to call dtrace_enabling_matchall() -- which 7674 * will acquire cpu_lock and dtrace_lock. We therefore need 7675 * to drop all of our locks before calling into it... 7676 */ 7677 mutex_exit(&dtrace_lock); 7678 mutex_exit(&dtrace_provider_lock); 7679 dtrace_enabling_matchall(); 7680 7681 return (0); 7682 } 7683 7684 mutex_exit(&dtrace_lock); 7685 mutex_exit(&dtrace_provider_lock); 7686 7687 return (0); 7688 } 7689 7690 /* 7691 * Unregister the specified provider from the DTrace framework. This should 7692 * generally be called by DTrace providers in their detach(9E) entry point. 7693 */ 7694 int 7695 dtrace_unregister(dtrace_provider_id_t id) 7696 { 7697 dtrace_provider_t *old = (dtrace_provider_t *)id; 7698 dtrace_provider_t *prev = NULL; 7699 int i, self = 0, noreap = 0; 7700 dtrace_probe_t *probe, *first = NULL; 7701 7702 if (old->dtpv_pops.dtps_enable == 7703 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7704 /* 7705 * If DTrace itself is the provider, we're called with locks 7706 * already held. 7707 */ 7708 ASSERT(old == dtrace_provider); 7709 #if defined(sun) 7710 ASSERT(dtrace_devi != NULL); 7711 #endif 7712 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7713 ASSERT(MUTEX_HELD(&dtrace_lock)); 7714 self = 1; 7715 7716 if (dtrace_provider->dtpv_next != NULL) { 7717 /* 7718 * There's another provider here; return failure. 7719 */ 7720 return (EBUSY); 7721 } 7722 } else { 7723 mutex_enter(&dtrace_provider_lock); 7724 #if defined(sun) 7725 mutex_enter(&mod_lock); 7726 #endif 7727 mutex_enter(&dtrace_lock); 7728 } 7729 7730 /* 7731 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7732 * probes, we refuse to let providers slither away, unless this 7733 * provider has already been explicitly invalidated. 7734 */ 7735 if (!old->dtpv_defunct && 7736 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7737 dtrace_anon.dta_state->dts_necbs > 0))) { 7738 if (!self) { 7739 mutex_exit(&dtrace_lock); 7740 #if defined(sun) 7741 mutex_exit(&mod_lock); 7742 #endif 7743 mutex_exit(&dtrace_provider_lock); 7744 } 7745 return (EBUSY); 7746 } 7747 7748 /* 7749 * Attempt to destroy the probes associated with this provider. 7750 */ 7751 for (i = 0; i < dtrace_nprobes; i++) { 7752 if ((probe = dtrace_probes[i]) == NULL) 7753 continue; 7754 7755 if (probe->dtpr_provider != old) 7756 continue; 7757 7758 if (probe->dtpr_ecb == NULL) 7759 continue; 7760 7761 /* 7762 * If we are trying to unregister a defunct provider, and the 7763 * provider was made defunct within the interval dictated by 7764 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7765 * attempt to reap our enablings. To denote that the provider 7766 * should reattempt to unregister itself at some point in the 7767 * future, we will return a differentiable error code (EAGAIN 7768 * instead of EBUSY) in this case. 7769 */ 7770 if (dtrace_gethrtime() - old->dtpv_defunct > 7771 dtrace_unregister_defunct_reap) 7772 noreap = 1; 7773 7774 if (!self) { 7775 mutex_exit(&dtrace_lock); 7776 #if defined(sun) 7777 mutex_exit(&mod_lock); 7778 #endif 7779 mutex_exit(&dtrace_provider_lock); 7780 } 7781 7782 if (noreap) 7783 return (EBUSY); 7784 7785 (void) taskq_dispatch(dtrace_taskq, 7786 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7787 7788 return (EAGAIN); 7789 } 7790 7791 /* 7792 * All of the probes for this provider are disabled; we can safely 7793 * remove all of them from their hash chains and from the probe array. 7794 */ 7795 for (i = 0; i < dtrace_nprobes; i++) { 7796 if ((probe = dtrace_probes[i]) == NULL) 7797 continue; 7798 7799 if (probe->dtpr_provider != old) 7800 continue; 7801 7802 dtrace_probes[i] = NULL; 7803 7804 dtrace_hash_remove(dtrace_bymod, probe); 7805 dtrace_hash_remove(dtrace_byfunc, probe); 7806 dtrace_hash_remove(dtrace_byname, probe); 7807 7808 if (first == NULL) { 7809 first = probe; 7810 probe->dtpr_nextmod = NULL; 7811 } else { 7812 probe->dtpr_nextmod = first; 7813 first = probe; 7814 } 7815 } 7816 7817 /* 7818 * The provider's probes have been removed from the hash chains and 7819 * from the probe array. Now issue a dtrace_sync() to be sure that 7820 * everyone has cleared out from any probe array processing. 7821 */ 7822 dtrace_sync(); 7823 7824 for (probe = first; probe != NULL; probe = first) { 7825 first = probe->dtpr_nextmod; 7826 7827 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7828 probe->dtpr_arg); 7829 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7830 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7831 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7832 #if defined(sun) 7833 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7834 #else 7835 free_unr(dtrace_arena, probe->dtpr_id); 7836 #endif 7837 kmem_free(probe, sizeof (dtrace_probe_t)); 7838 } 7839 7840 if ((prev = dtrace_provider) == old) { 7841 #if defined(sun) 7842 ASSERT(self || dtrace_devi == NULL); 7843 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7844 #endif 7845 dtrace_provider = old->dtpv_next; 7846 } else { 7847 while (prev != NULL && prev->dtpv_next != old) 7848 prev = prev->dtpv_next; 7849 7850 if (prev == NULL) { 7851 panic("attempt to unregister non-existent " 7852 "dtrace provider %p\n", (void *)id); 7853 } 7854 7855 prev->dtpv_next = old->dtpv_next; 7856 } 7857 7858 if (!self) { 7859 mutex_exit(&dtrace_lock); 7860 #if defined(sun) 7861 mutex_exit(&mod_lock); 7862 #endif 7863 mutex_exit(&dtrace_provider_lock); 7864 } 7865 7866 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7867 kmem_free(old, sizeof (dtrace_provider_t)); 7868 7869 return (0); 7870 } 7871 7872 /* 7873 * Invalidate the specified provider. All subsequent probe lookups for the 7874 * specified provider will fail, but its probes will not be removed. 7875 */ 7876 void 7877 dtrace_invalidate(dtrace_provider_id_t id) 7878 { 7879 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7880 7881 ASSERT(pvp->dtpv_pops.dtps_enable != 7882 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7883 7884 mutex_enter(&dtrace_provider_lock); 7885 mutex_enter(&dtrace_lock); 7886 7887 pvp->dtpv_defunct = dtrace_gethrtime(); 7888 7889 mutex_exit(&dtrace_lock); 7890 mutex_exit(&dtrace_provider_lock); 7891 } 7892 7893 /* 7894 * Indicate whether or not DTrace has attached. 7895 */ 7896 int 7897 dtrace_attached(void) 7898 { 7899 /* 7900 * dtrace_provider will be non-NULL iff the DTrace driver has 7901 * attached. (It's non-NULL because DTrace is always itself a 7902 * provider.) 7903 */ 7904 return (dtrace_provider != NULL); 7905 } 7906 7907 /* 7908 * Remove all the unenabled probes for the given provider. This function is 7909 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7910 * -- just as many of its associated probes as it can. 7911 */ 7912 int 7913 dtrace_condense(dtrace_provider_id_t id) 7914 { 7915 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7916 int i; 7917 dtrace_probe_t *probe; 7918 7919 /* 7920 * Make sure this isn't the dtrace provider itself. 7921 */ 7922 ASSERT(prov->dtpv_pops.dtps_enable != 7923 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7924 7925 mutex_enter(&dtrace_provider_lock); 7926 mutex_enter(&dtrace_lock); 7927 7928 /* 7929 * Attempt to destroy the probes associated with this provider. 7930 */ 7931 for (i = 0; i < dtrace_nprobes; i++) { 7932 if ((probe = dtrace_probes[i]) == NULL) 7933 continue; 7934 7935 if (probe->dtpr_provider != prov) 7936 continue; 7937 7938 if (probe->dtpr_ecb != NULL) 7939 continue; 7940 7941 dtrace_probes[i] = NULL; 7942 7943 dtrace_hash_remove(dtrace_bymod, probe); 7944 dtrace_hash_remove(dtrace_byfunc, probe); 7945 dtrace_hash_remove(dtrace_byname, probe); 7946 7947 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7948 probe->dtpr_arg); 7949 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7950 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7951 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7952 kmem_free(probe, sizeof (dtrace_probe_t)); 7953 #if defined(sun) 7954 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7955 #else 7956 free_unr(dtrace_arena, i + 1); 7957 #endif 7958 } 7959 7960 mutex_exit(&dtrace_lock); 7961 mutex_exit(&dtrace_provider_lock); 7962 7963 return (0); 7964 } 7965 7966 /* 7967 * DTrace Probe Management Functions 7968 * 7969 * The functions in this section perform the DTrace probe management, 7970 * including functions to create probes, look-up probes, and call into the 7971 * providers to request that probes be provided. Some of these functions are 7972 * in the Provider-to-Framework API; these functions can be identified by the 7973 * fact that they are not declared "static". 7974 */ 7975 7976 /* 7977 * Create a probe with the specified module name, function name, and name. 7978 */ 7979 dtrace_id_t 7980 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7981 const char *func, const char *name, int aframes, void *arg) 7982 { 7983 dtrace_probe_t *probe, **probes; 7984 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7985 dtrace_id_t id; 7986 7987 if (provider == dtrace_provider) { 7988 ASSERT(MUTEX_HELD(&dtrace_lock)); 7989 } else { 7990 mutex_enter(&dtrace_lock); 7991 } 7992 7993 #if defined(sun) 7994 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7995 VM_BESTFIT | VM_SLEEP); 7996 #else 7997 id = alloc_unr(dtrace_arena); 7998 #endif 7999 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 8000 8001 probe->dtpr_id = id; 8002 probe->dtpr_gen = dtrace_probegen++; 8003 probe->dtpr_mod = dtrace_strdup(mod); 8004 probe->dtpr_func = dtrace_strdup(func); 8005 probe->dtpr_name = dtrace_strdup(name); 8006 probe->dtpr_arg = arg; 8007 probe->dtpr_aframes = aframes; 8008 probe->dtpr_provider = provider; 8009 8010 dtrace_hash_add(dtrace_bymod, probe); 8011 dtrace_hash_add(dtrace_byfunc, probe); 8012 dtrace_hash_add(dtrace_byname, probe); 8013 8014 if (id - 1 >= dtrace_nprobes) { 8015 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 8016 size_t nsize = osize << 1; 8017 8018 if (nsize == 0) { 8019 ASSERT(osize == 0); 8020 ASSERT(dtrace_probes == NULL); 8021 nsize = sizeof (dtrace_probe_t *); 8022 } 8023 8024 probes = kmem_zalloc(nsize, KM_SLEEP); 8025 8026 if (dtrace_probes == NULL) { 8027 ASSERT(osize == 0); 8028 dtrace_probes = probes; 8029 dtrace_nprobes = 1; 8030 } else { 8031 dtrace_probe_t **oprobes = dtrace_probes; 8032 8033 bcopy(oprobes, probes, osize); 8034 dtrace_membar_producer(); 8035 dtrace_probes = probes; 8036 8037 dtrace_sync(); 8038 8039 /* 8040 * All CPUs are now seeing the new probes array; we can 8041 * safely free the old array. 8042 */ 8043 kmem_free(oprobes, osize); 8044 dtrace_nprobes <<= 1; 8045 } 8046 8047 ASSERT(id - 1 < dtrace_nprobes); 8048 } 8049 8050 ASSERT(dtrace_probes[id - 1] == NULL); 8051 dtrace_probes[id - 1] = probe; 8052 8053 if (provider != dtrace_provider) 8054 mutex_exit(&dtrace_lock); 8055 8056 return (id); 8057 } 8058 8059 static dtrace_probe_t * 8060 dtrace_probe_lookup_id(dtrace_id_t id) 8061 { 8062 ASSERT(MUTEX_HELD(&dtrace_lock)); 8063 8064 if (id == 0 || id > dtrace_nprobes) 8065 return (NULL); 8066 8067 return (dtrace_probes[id - 1]); 8068 } 8069 8070 static int 8071 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 8072 { 8073 *((dtrace_id_t *)arg) = probe->dtpr_id; 8074 8075 return (DTRACE_MATCH_DONE); 8076 } 8077 8078 /* 8079 * Look up a probe based on provider and one or more of module name, function 8080 * name and probe name. 8081 */ 8082 dtrace_id_t 8083 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 8084 char *func, char *name) 8085 { 8086 dtrace_probekey_t pkey; 8087 dtrace_id_t id; 8088 int match; 8089 8090 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 8091 pkey.dtpk_pmatch = &dtrace_match_string; 8092 pkey.dtpk_mod = mod; 8093 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 8094 pkey.dtpk_func = func; 8095 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 8096 pkey.dtpk_name = name; 8097 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 8098 pkey.dtpk_id = DTRACE_IDNONE; 8099 8100 mutex_enter(&dtrace_lock); 8101 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 8102 dtrace_probe_lookup_match, &id); 8103 mutex_exit(&dtrace_lock); 8104 8105 ASSERT(match == 1 || match == 0); 8106 return (match ? id : 0); 8107 } 8108 8109 /* 8110 * Returns the probe argument associated with the specified probe. 8111 */ 8112 void * 8113 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8114 { 8115 dtrace_probe_t *probe; 8116 void *rval = NULL; 8117 8118 mutex_enter(&dtrace_lock); 8119 8120 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8121 probe->dtpr_provider == (dtrace_provider_t *)id) 8122 rval = probe->dtpr_arg; 8123 8124 mutex_exit(&dtrace_lock); 8125 8126 return (rval); 8127 } 8128 8129 /* 8130 * Copy a probe into a probe description. 8131 */ 8132 static void 8133 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8134 { 8135 bzero(pdp, sizeof (dtrace_probedesc_t)); 8136 pdp->dtpd_id = prp->dtpr_id; 8137 8138 (void) strncpy(pdp->dtpd_provider, 8139 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8140 8141 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8142 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8143 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8144 } 8145 8146 /* 8147 * Called to indicate that a probe -- or probes -- should be provided by a 8148 * specfied provider. If the specified description is NULL, the provider will 8149 * be told to provide all of its probes. (This is done whenever a new 8150 * consumer comes along, or whenever a retained enabling is to be matched.) If 8151 * the specified description is non-NULL, the provider is given the 8152 * opportunity to dynamically provide the specified probe, allowing providers 8153 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8154 * probes.) If the provider is NULL, the operations will be applied to all 8155 * providers; if the provider is non-NULL the operations will only be applied 8156 * to the specified provider. The dtrace_provider_lock must be held, and the 8157 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8158 * will need to grab the dtrace_lock when it reenters the framework through 8159 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8160 */ 8161 static void 8162 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8163 { 8164 #if defined(sun) 8165 modctl_t *ctl; 8166 #endif 8167 int all = 0; 8168 8169 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8170 8171 if (prv == NULL) { 8172 all = 1; 8173 prv = dtrace_provider; 8174 } 8175 8176 do { 8177 /* 8178 * First, call the blanket provide operation. 8179 */ 8180 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8181 8182 #if defined(sun) 8183 /* 8184 * Now call the per-module provide operation. We will grab 8185 * mod_lock to prevent the list from being modified. Note 8186 * that this also prevents the mod_busy bits from changing. 8187 * (mod_busy can only be changed with mod_lock held.) 8188 */ 8189 mutex_enter(&mod_lock); 8190 8191 ctl = &modules; 8192 do { 8193 if (ctl->mod_busy || ctl->mod_mp == NULL) 8194 continue; 8195 8196 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8197 8198 } while ((ctl = ctl->mod_next) != &modules); 8199 8200 mutex_exit(&mod_lock); 8201 #endif 8202 } while (all && (prv = prv->dtpv_next) != NULL); 8203 } 8204 8205 #if defined(sun) 8206 /* 8207 * Iterate over each probe, and call the Framework-to-Provider API function 8208 * denoted by offs. 8209 */ 8210 static void 8211 dtrace_probe_foreach(uintptr_t offs) 8212 { 8213 dtrace_provider_t *prov; 8214 void (*func)(void *, dtrace_id_t, void *); 8215 dtrace_probe_t *probe; 8216 dtrace_icookie_t cookie; 8217 int i; 8218 8219 /* 8220 * We disable interrupts to walk through the probe array. This is 8221 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8222 * won't see stale data. 8223 */ 8224 cookie = dtrace_interrupt_disable(); 8225 8226 for (i = 0; i < dtrace_nprobes; i++) { 8227 if ((probe = dtrace_probes[i]) == NULL) 8228 continue; 8229 8230 if (probe->dtpr_ecb == NULL) { 8231 /* 8232 * This probe isn't enabled -- don't call the function. 8233 */ 8234 continue; 8235 } 8236 8237 prov = probe->dtpr_provider; 8238 func = *((void(**)(void *, dtrace_id_t, void *)) 8239 ((uintptr_t)&prov->dtpv_pops + offs)); 8240 8241 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8242 } 8243 8244 dtrace_interrupt_enable(cookie); 8245 } 8246 #endif 8247 8248 static int 8249 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8250 { 8251 dtrace_probekey_t pkey; 8252 uint32_t priv; 8253 uid_t uid; 8254 zoneid_t zoneid; 8255 8256 ASSERT(MUTEX_HELD(&dtrace_lock)); 8257 dtrace_ecb_create_cache = NULL; 8258 8259 if (desc == NULL) { 8260 /* 8261 * If we're passed a NULL description, we're being asked to 8262 * create an ECB with a NULL probe. 8263 */ 8264 (void) dtrace_ecb_create_enable(NULL, enab); 8265 return (0); 8266 } 8267 8268 dtrace_probekey(desc, &pkey); 8269 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8270 &priv, &uid, &zoneid); 8271 8272 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8273 enab)); 8274 } 8275 8276 /* 8277 * DTrace Helper Provider Functions 8278 */ 8279 static void 8280 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8281 { 8282 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8283 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8284 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8285 } 8286 8287 static void 8288 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8289 const dof_provider_t *dofprov, char *strtab) 8290 { 8291 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8292 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8293 dofprov->dofpv_provattr); 8294 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8295 dofprov->dofpv_modattr); 8296 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8297 dofprov->dofpv_funcattr); 8298 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8299 dofprov->dofpv_nameattr); 8300 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8301 dofprov->dofpv_argsattr); 8302 } 8303 8304 static void 8305 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8306 { 8307 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8308 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8309 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8310 dof_provider_t *provider; 8311 dof_probe_t *probe; 8312 uint32_t *off, *enoff; 8313 uint8_t *arg; 8314 char *strtab; 8315 uint_t i, nprobes; 8316 dtrace_helper_provdesc_t dhpv; 8317 dtrace_helper_probedesc_t dhpb; 8318 dtrace_meta_t *meta = dtrace_meta_pid; 8319 dtrace_mops_t *mops = &meta->dtm_mops; 8320 void *parg; 8321 8322 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8323 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8324 provider->dofpv_strtab * dof->dofh_secsize); 8325 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8326 provider->dofpv_probes * dof->dofh_secsize); 8327 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8328 provider->dofpv_prargs * dof->dofh_secsize); 8329 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8330 provider->dofpv_proffs * dof->dofh_secsize); 8331 8332 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8333 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8334 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8335 enoff = NULL; 8336 8337 /* 8338 * See dtrace_helper_provider_validate(). 8339 */ 8340 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8341 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8342 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8343 provider->dofpv_prenoffs * dof->dofh_secsize); 8344 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8345 } 8346 8347 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8348 8349 /* 8350 * Create the provider. 8351 */ 8352 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8353 8354 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8355 return; 8356 8357 meta->dtm_count++; 8358 8359 /* 8360 * Create the probes. 8361 */ 8362 for (i = 0; i < nprobes; i++) { 8363 probe = (dof_probe_t *)(uintptr_t)(daddr + 8364 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8365 8366 dhpb.dthpb_mod = dhp->dofhp_mod; 8367 dhpb.dthpb_func = strtab + probe->dofpr_func; 8368 dhpb.dthpb_name = strtab + probe->dofpr_name; 8369 dhpb.dthpb_base = probe->dofpr_addr; 8370 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8371 dhpb.dthpb_noffs = probe->dofpr_noffs; 8372 if (enoff != NULL) { 8373 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8374 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8375 } else { 8376 dhpb.dthpb_enoffs = NULL; 8377 dhpb.dthpb_nenoffs = 0; 8378 } 8379 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8380 dhpb.dthpb_nargc = probe->dofpr_nargc; 8381 dhpb.dthpb_xargc = probe->dofpr_xargc; 8382 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8383 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8384 8385 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8386 } 8387 } 8388 8389 static void 8390 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8391 { 8392 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8393 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8394 int i; 8395 8396 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8397 8398 for (i = 0; i < dof->dofh_secnum; i++) { 8399 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8400 dof->dofh_secoff + i * dof->dofh_secsize); 8401 8402 if (sec->dofs_type != DOF_SECT_PROVIDER) 8403 continue; 8404 8405 dtrace_helper_provide_one(dhp, sec, pid); 8406 } 8407 8408 /* 8409 * We may have just created probes, so we must now rematch against 8410 * any retained enablings. Note that this call will acquire both 8411 * cpu_lock and dtrace_lock; the fact that we are holding 8412 * dtrace_meta_lock now is what defines the ordering with respect to 8413 * these three locks. 8414 */ 8415 dtrace_enabling_matchall(); 8416 } 8417 8418 static void 8419 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8420 { 8421 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8422 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8423 dof_sec_t *str_sec; 8424 dof_provider_t *provider; 8425 char *strtab; 8426 dtrace_helper_provdesc_t dhpv; 8427 dtrace_meta_t *meta = dtrace_meta_pid; 8428 dtrace_mops_t *mops = &meta->dtm_mops; 8429 8430 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8431 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8432 provider->dofpv_strtab * dof->dofh_secsize); 8433 8434 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8435 8436 /* 8437 * Create the provider. 8438 */ 8439 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8440 8441 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8442 8443 meta->dtm_count--; 8444 } 8445 8446 static void 8447 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8448 { 8449 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8450 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8451 int i; 8452 8453 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8454 8455 for (i = 0; i < dof->dofh_secnum; i++) { 8456 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8457 dof->dofh_secoff + i * dof->dofh_secsize); 8458 8459 if (sec->dofs_type != DOF_SECT_PROVIDER) 8460 continue; 8461 8462 dtrace_helper_provider_remove_one(dhp, sec, pid); 8463 } 8464 } 8465 8466 /* 8467 * DTrace Meta Provider-to-Framework API Functions 8468 * 8469 * These functions implement the Meta Provider-to-Framework API, as described 8470 * in <sys/dtrace.h>. 8471 */ 8472 int 8473 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8474 dtrace_meta_provider_id_t *idp) 8475 { 8476 dtrace_meta_t *meta; 8477 dtrace_helpers_t *help, *next; 8478 int i; 8479 8480 *idp = DTRACE_METAPROVNONE; 8481 8482 /* 8483 * We strictly don't need the name, but we hold onto it for 8484 * debuggability. All hail error queues! 8485 */ 8486 if (name == NULL) { 8487 cmn_err(CE_WARN, "failed to register meta-provider: " 8488 "invalid name"); 8489 return (EINVAL); 8490 } 8491 8492 if (mops == NULL || 8493 mops->dtms_create_probe == NULL || 8494 mops->dtms_provide_pid == NULL || 8495 mops->dtms_remove_pid == NULL) { 8496 cmn_err(CE_WARN, "failed to register meta-register %s: " 8497 "invalid ops", name); 8498 return (EINVAL); 8499 } 8500 8501 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8502 meta->dtm_mops = *mops; 8503 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8504 (void) strcpy(meta->dtm_name, name); 8505 meta->dtm_arg = arg; 8506 8507 mutex_enter(&dtrace_meta_lock); 8508 mutex_enter(&dtrace_lock); 8509 8510 if (dtrace_meta_pid != NULL) { 8511 mutex_exit(&dtrace_lock); 8512 mutex_exit(&dtrace_meta_lock); 8513 cmn_err(CE_WARN, "failed to register meta-register %s: " 8514 "user-land meta-provider exists", name); 8515 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8516 kmem_free(meta, sizeof (dtrace_meta_t)); 8517 return (EINVAL); 8518 } 8519 8520 dtrace_meta_pid = meta; 8521 *idp = (dtrace_meta_provider_id_t)meta; 8522 8523 /* 8524 * If there are providers and probes ready to go, pass them 8525 * off to the new meta provider now. 8526 */ 8527 8528 help = dtrace_deferred_pid; 8529 dtrace_deferred_pid = NULL; 8530 8531 mutex_exit(&dtrace_lock); 8532 8533 while (help != NULL) { 8534 for (i = 0; i < help->dthps_nprovs; i++) { 8535 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8536 help->dthps_pid); 8537 } 8538 8539 next = help->dthps_next; 8540 help->dthps_next = NULL; 8541 help->dthps_prev = NULL; 8542 help->dthps_deferred = 0; 8543 help = next; 8544 } 8545 8546 mutex_exit(&dtrace_meta_lock); 8547 8548 return (0); 8549 } 8550 8551 int 8552 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8553 { 8554 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8555 8556 mutex_enter(&dtrace_meta_lock); 8557 mutex_enter(&dtrace_lock); 8558 8559 if (old == dtrace_meta_pid) { 8560 pp = &dtrace_meta_pid; 8561 } else { 8562 panic("attempt to unregister non-existent " 8563 "dtrace meta-provider %p\n", (void *)old); 8564 } 8565 8566 if (old->dtm_count != 0) { 8567 mutex_exit(&dtrace_lock); 8568 mutex_exit(&dtrace_meta_lock); 8569 return (EBUSY); 8570 } 8571 8572 *pp = NULL; 8573 8574 mutex_exit(&dtrace_lock); 8575 mutex_exit(&dtrace_meta_lock); 8576 8577 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8578 kmem_free(old, sizeof (dtrace_meta_t)); 8579 8580 return (0); 8581 } 8582 8583 8584 /* 8585 * DTrace DIF Object Functions 8586 */ 8587 static int 8588 dtrace_difo_err(uint_t pc, const char *format, ...) 8589 { 8590 if (dtrace_err_verbose) { 8591 va_list alist; 8592 8593 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8594 va_start(alist, format); 8595 (void) vuprintf(format, alist); 8596 va_end(alist); 8597 } 8598 8599 #ifdef DTRACE_ERRDEBUG 8600 dtrace_errdebug(format); 8601 #endif 8602 return (1); 8603 } 8604 8605 /* 8606 * Validate a DTrace DIF object by checking the IR instructions. The following 8607 * rules are currently enforced by dtrace_difo_validate(): 8608 * 8609 * 1. Each instruction must have a valid opcode 8610 * 2. Each register, string, variable, or subroutine reference must be valid 8611 * 3. No instruction can modify register %r0 (must be zero) 8612 * 4. All instruction reserved bits must be set to zero 8613 * 5. The last instruction must be a "ret" instruction 8614 * 6. All branch targets must reference a valid instruction _after_ the branch 8615 */ 8616 static int 8617 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8618 cred_t *cr) 8619 { 8620 int err = 0, i; 8621 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8622 int kcheckload; 8623 uint_t pc; 8624 8625 kcheckload = cr == NULL || 8626 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8627 8628 dp->dtdo_destructive = 0; 8629 8630 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8631 dif_instr_t instr = dp->dtdo_buf[pc]; 8632 8633 uint_t r1 = DIF_INSTR_R1(instr); 8634 uint_t r2 = DIF_INSTR_R2(instr); 8635 uint_t rd = DIF_INSTR_RD(instr); 8636 uint_t rs = DIF_INSTR_RS(instr); 8637 uint_t label = DIF_INSTR_LABEL(instr); 8638 uint_t v = DIF_INSTR_VAR(instr); 8639 uint_t subr = DIF_INSTR_SUBR(instr); 8640 uint_t type = DIF_INSTR_TYPE(instr); 8641 uint_t op = DIF_INSTR_OP(instr); 8642 8643 switch (op) { 8644 case DIF_OP_OR: 8645 case DIF_OP_XOR: 8646 case DIF_OP_AND: 8647 case DIF_OP_SLL: 8648 case DIF_OP_SRL: 8649 case DIF_OP_SRA: 8650 case DIF_OP_SUB: 8651 case DIF_OP_ADD: 8652 case DIF_OP_MUL: 8653 case DIF_OP_SDIV: 8654 case DIF_OP_UDIV: 8655 case DIF_OP_SREM: 8656 case DIF_OP_UREM: 8657 case DIF_OP_COPYS: 8658 if (r1 >= nregs) 8659 err += efunc(pc, "invalid register %u\n", r1); 8660 if (r2 >= nregs) 8661 err += efunc(pc, "invalid register %u\n", r2); 8662 if (rd >= nregs) 8663 err += efunc(pc, "invalid register %u\n", rd); 8664 if (rd == 0) 8665 err += efunc(pc, "cannot write to %r0\n"); 8666 break; 8667 case DIF_OP_NOT: 8668 case DIF_OP_MOV: 8669 case DIF_OP_ALLOCS: 8670 if (r1 >= nregs) 8671 err += efunc(pc, "invalid register %u\n", r1); 8672 if (r2 != 0) 8673 err += efunc(pc, "non-zero reserved bits\n"); 8674 if (rd >= nregs) 8675 err += efunc(pc, "invalid register %u\n", rd); 8676 if (rd == 0) 8677 err += efunc(pc, "cannot write to %r0\n"); 8678 break; 8679 case DIF_OP_LDSB: 8680 case DIF_OP_LDSH: 8681 case DIF_OP_LDSW: 8682 case DIF_OP_LDUB: 8683 case DIF_OP_LDUH: 8684 case DIF_OP_LDUW: 8685 case DIF_OP_LDX: 8686 if (r1 >= nregs) 8687 err += efunc(pc, "invalid register %u\n", r1); 8688 if (r2 != 0) 8689 err += efunc(pc, "non-zero reserved bits\n"); 8690 if (rd >= nregs) 8691 err += efunc(pc, "invalid register %u\n", rd); 8692 if (rd == 0) 8693 err += efunc(pc, "cannot write to %r0\n"); 8694 if (kcheckload) 8695 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8696 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8697 break; 8698 case DIF_OP_RLDSB: 8699 case DIF_OP_RLDSH: 8700 case DIF_OP_RLDSW: 8701 case DIF_OP_RLDUB: 8702 case DIF_OP_RLDUH: 8703 case DIF_OP_RLDUW: 8704 case DIF_OP_RLDX: 8705 if (r1 >= nregs) 8706 err += efunc(pc, "invalid register %u\n", r1); 8707 if (r2 != 0) 8708 err += efunc(pc, "non-zero reserved bits\n"); 8709 if (rd >= nregs) 8710 err += efunc(pc, "invalid register %u\n", rd); 8711 if (rd == 0) 8712 err += efunc(pc, "cannot write to %r0\n"); 8713 break; 8714 case DIF_OP_ULDSB: 8715 case DIF_OP_ULDSH: 8716 case DIF_OP_ULDSW: 8717 case DIF_OP_ULDUB: 8718 case DIF_OP_ULDUH: 8719 case DIF_OP_ULDUW: 8720 case DIF_OP_ULDX: 8721 if (r1 >= nregs) 8722 err += efunc(pc, "invalid register %u\n", r1); 8723 if (r2 != 0) 8724 err += efunc(pc, "non-zero reserved bits\n"); 8725 if (rd >= nregs) 8726 err += efunc(pc, "invalid register %u\n", rd); 8727 if (rd == 0) 8728 err += efunc(pc, "cannot write to %r0\n"); 8729 break; 8730 case DIF_OP_STB: 8731 case DIF_OP_STH: 8732 case DIF_OP_STW: 8733 case DIF_OP_STX: 8734 if (r1 >= nregs) 8735 err += efunc(pc, "invalid register %u\n", r1); 8736 if (r2 != 0) 8737 err += efunc(pc, "non-zero reserved bits\n"); 8738 if (rd >= nregs) 8739 err += efunc(pc, "invalid register %u\n", rd); 8740 if (rd == 0) 8741 err += efunc(pc, "cannot write to 0 address\n"); 8742 break; 8743 case DIF_OP_CMP: 8744 case DIF_OP_SCMP: 8745 if (r1 >= nregs) 8746 err += efunc(pc, "invalid register %u\n", r1); 8747 if (r2 >= nregs) 8748 err += efunc(pc, "invalid register %u\n", r2); 8749 if (rd != 0) 8750 err += efunc(pc, "non-zero reserved bits\n"); 8751 break; 8752 case DIF_OP_TST: 8753 if (r1 >= nregs) 8754 err += efunc(pc, "invalid register %u\n", r1); 8755 if (r2 != 0 || rd != 0) 8756 err += efunc(pc, "non-zero reserved bits\n"); 8757 break; 8758 case DIF_OP_BA: 8759 case DIF_OP_BE: 8760 case DIF_OP_BNE: 8761 case DIF_OP_BG: 8762 case DIF_OP_BGU: 8763 case DIF_OP_BGE: 8764 case DIF_OP_BGEU: 8765 case DIF_OP_BL: 8766 case DIF_OP_BLU: 8767 case DIF_OP_BLE: 8768 case DIF_OP_BLEU: 8769 if (label >= dp->dtdo_len) { 8770 err += efunc(pc, "invalid branch target %u\n", 8771 label); 8772 } 8773 if (label <= pc) { 8774 err += efunc(pc, "backward branch to %u\n", 8775 label); 8776 } 8777 break; 8778 case DIF_OP_RET: 8779 if (r1 != 0 || r2 != 0) 8780 err += efunc(pc, "non-zero reserved bits\n"); 8781 if (rd >= nregs) 8782 err += efunc(pc, "invalid register %u\n", rd); 8783 break; 8784 case DIF_OP_NOP: 8785 case DIF_OP_POPTS: 8786 case DIF_OP_FLUSHTS: 8787 if (r1 != 0 || r2 != 0 || rd != 0) 8788 err += efunc(pc, "non-zero reserved bits\n"); 8789 break; 8790 case DIF_OP_SETX: 8791 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8792 err += efunc(pc, "invalid integer ref %u\n", 8793 DIF_INSTR_INTEGER(instr)); 8794 } 8795 if (rd >= nregs) 8796 err += efunc(pc, "invalid register %u\n", rd); 8797 if (rd == 0) 8798 err += efunc(pc, "cannot write to %r0\n"); 8799 break; 8800 case DIF_OP_SETS: 8801 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8802 err += efunc(pc, "invalid string ref %u\n", 8803 DIF_INSTR_STRING(instr)); 8804 } 8805 if (rd >= nregs) 8806 err += efunc(pc, "invalid register %u\n", rd); 8807 if (rd == 0) 8808 err += efunc(pc, "cannot write to %r0\n"); 8809 break; 8810 case DIF_OP_LDGA: 8811 case DIF_OP_LDTA: 8812 if (r1 > DIF_VAR_ARRAY_MAX) 8813 err += efunc(pc, "invalid array %u\n", r1); 8814 if (r2 >= nregs) 8815 err += efunc(pc, "invalid register %u\n", r2); 8816 if (rd >= nregs) 8817 err += efunc(pc, "invalid register %u\n", rd); 8818 if (rd == 0) 8819 err += efunc(pc, "cannot write to %r0\n"); 8820 break; 8821 case DIF_OP_LDGS: 8822 case DIF_OP_LDTS: 8823 case DIF_OP_LDLS: 8824 case DIF_OP_LDGAA: 8825 case DIF_OP_LDTAA: 8826 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8827 err += efunc(pc, "invalid variable %u\n", v); 8828 if (rd >= nregs) 8829 err += efunc(pc, "invalid register %u\n", rd); 8830 if (rd == 0) 8831 err += efunc(pc, "cannot write to %r0\n"); 8832 break; 8833 case DIF_OP_STGS: 8834 case DIF_OP_STTS: 8835 case DIF_OP_STLS: 8836 case DIF_OP_STGAA: 8837 case DIF_OP_STTAA: 8838 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8839 err += efunc(pc, "invalid variable %u\n", v); 8840 if (rs >= nregs) 8841 err += efunc(pc, "invalid register %u\n", rd); 8842 break; 8843 case DIF_OP_CALL: 8844 if (subr > DIF_SUBR_MAX) 8845 err += efunc(pc, "invalid subr %u\n", subr); 8846 if (rd >= nregs) 8847 err += efunc(pc, "invalid register %u\n", rd); 8848 if (rd == 0) 8849 err += efunc(pc, "cannot write to %r0\n"); 8850 8851 if (subr == DIF_SUBR_COPYOUT || 8852 subr == DIF_SUBR_COPYOUTSTR) { 8853 dp->dtdo_destructive = 1; 8854 } 8855 break; 8856 case DIF_OP_PUSHTR: 8857 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8858 err += efunc(pc, "invalid ref type %u\n", type); 8859 if (r2 >= nregs) 8860 err += efunc(pc, "invalid register %u\n", r2); 8861 if (rs >= nregs) 8862 err += efunc(pc, "invalid register %u\n", rs); 8863 break; 8864 case DIF_OP_PUSHTV: 8865 if (type != DIF_TYPE_CTF) 8866 err += efunc(pc, "invalid val type %u\n", type); 8867 if (r2 >= nregs) 8868 err += efunc(pc, "invalid register %u\n", r2); 8869 if (rs >= nregs) 8870 err += efunc(pc, "invalid register %u\n", rs); 8871 break; 8872 default: 8873 err += efunc(pc, "invalid opcode %u\n", 8874 DIF_INSTR_OP(instr)); 8875 } 8876 } 8877 8878 if (dp->dtdo_len != 0 && 8879 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8880 err += efunc(dp->dtdo_len - 1, 8881 "expected 'ret' as last DIF instruction\n"); 8882 } 8883 8884 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8885 /* 8886 * If we're not returning by reference, the size must be either 8887 * 0 or the size of one of the base types. 8888 */ 8889 switch (dp->dtdo_rtype.dtdt_size) { 8890 case 0: 8891 case sizeof (uint8_t): 8892 case sizeof (uint16_t): 8893 case sizeof (uint32_t): 8894 case sizeof (uint64_t): 8895 break; 8896 8897 default: 8898 err += efunc(dp->dtdo_len - 1, "bad return size"); 8899 } 8900 } 8901 8902 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8903 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8904 dtrace_diftype_t *vt, *et; 8905 uint_t id, ndx; 8906 8907 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8908 v->dtdv_scope != DIFV_SCOPE_THREAD && 8909 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8910 err += efunc(i, "unrecognized variable scope %d\n", 8911 v->dtdv_scope); 8912 break; 8913 } 8914 8915 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8916 v->dtdv_kind != DIFV_KIND_SCALAR) { 8917 err += efunc(i, "unrecognized variable type %d\n", 8918 v->dtdv_kind); 8919 break; 8920 } 8921 8922 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8923 err += efunc(i, "%d exceeds variable id limit\n", id); 8924 break; 8925 } 8926 8927 if (id < DIF_VAR_OTHER_UBASE) 8928 continue; 8929 8930 /* 8931 * For user-defined variables, we need to check that this 8932 * definition is identical to any previous definition that we 8933 * encountered. 8934 */ 8935 ndx = id - DIF_VAR_OTHER_UBASE; 8936 8937 switch (v->dtdv_scope) { 8938 case DIFV_SCOPE_GLOBAL: 8939 if (ndx < vstate->dtvs_nglobals) { 8940 dtrace_statvar_t *svar; 8941 8942 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8943 existing = &svar->dtsv_var; 8944 } 8945 8946 break; 8947 8948 case DIFV_SCOPE_THREAD: 8949 if (ndx < vstate->dtvs_ntlocals) 8950 existing = &vstate->dtvs_tlocals[ndx]; 8951 break; 8952 8953 case DIFV_SCOPE_LOCAL: 8954 if (ndx < vstate->dtvs_nlocals) { 8955 dtrace_statvar_t *svar; 8956 8957 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8958 existing = &svar->dtsv_var; 8959 } 8960 8961 break; 8962 } 8963 8964 vt = &v->dtdv_type; 8965 8966 if (vt->dtdt_flags & DIF_TF_BYREF) { 8967 if (vt->dtdt_size == 0) { 8968 err += efunc(i, "zero-sized variable\n"); 8969 break; 8970 } 8971 8972 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8973 vt->dtdt_size > dtrace_global_maxsize) { 8974 err += efunc(i, "oversized by-ref global\n"); 8975 break; 8976 } 8977 } 8978 8979 if (existing == NULL || existing->dtdv_id == 0) 8980 continue; 8981 8982 ASSERT(existing->dtdv_id == v->dtdv_id); 8983 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8984 8985 if (existing->dtdv_kind != v->dtdv_kind) 8986 err += efunc(i, "%d changed variable kind\n", id); 8987 8988 et = &existing->dtdv_type; 8989 8990 if (vt->dtdt_flags != et->dtdt_flags) { 8991 err += efunc(i, "%d changed variable type flags\n", id); 8992 break; 8993 } 8994 8995 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8996 err += efunc(i, "%d changed variable type size\n", id); 8997 break; 8998 } 8999 } 9000 9001 return (err); 9002 } 9003 9004 /* 9005 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 9006 * are much more constrained than normal DIFOs. Specifically, they may 9007 * not: 9008 * 9009 * 1. Make calls to subroutines other than copyin(), copyinstr() or 9010 * miscellaneous string routines 9011 * 2. Access DTrace variables other than the args[] array, and the 9012 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 9013 * 3. Have thread-local variables. 9014 * 4. Have dynamic variables. 9015 */ 9016 static int 9017 dtrace_difo_validate_helper(dtrace_difo_t *dp) 9018 { 9019 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9020 int err = 0; 9021 uint_t pc; 9022 9023 for (pc = 0; pc < dp->dtdo_len; pc++) { 9024 dif_instr_t instr = dp->dtdo_buf[pc]; 9025 9026 uint_t v = DIF_INSTR_VAR(instr); 9027 uint_t subr = DIF_INSTR_SUBR(instr); 9028 uint_t op = DIF_INSTR_OP(instr); 9029 9030 switch (op) { 9031 case DIF_OP_OR: 9032 case DIF_OP_XOR: 9033 case DIF_OP_AND: 9034 case DIF_OP_SLL: 9035 case DIF_OP_SRL: 9036 case DIF_OP_SRA: 9037 case DIF_OP_SUB: 9038 case DIF_OP_ADD: 9039 case DIF_OP_MUL: 9040 case DIF_OP_SDIV: 9041 case DIF_OP_UDIV: 9042 case DIF_OP_SREM: 9043 case DIF_OP_UREM: 9044 case DIF_OP_COPYS: 9045 case DIF_OP_NOT: 9046 case DIF_OP_MOV: 9047 case DIF_OP_RLDSB: 9048 case DIF_OP_RLDSH: 9049 case DIF_OP_RLDSW: 9050 case DIF_OP_RLDUB: 9051 case DIF_OP_RLDUH: 9052 case DIF_OP_RLDUW: 9053 case DIF_OP_RLDX: 9054 case DIF_OP_ULDSB: 9055 case DIF_OP_ULDSH: 9056 case DIF_OP_ULDSW: 9057 case DIF_OP_ULDUB: 9058 case DIF_OP_ULDUH: 9059 case DIF_OP_ULDUW: 9060 case DIF_OP_ULDX: 9061 case DIF_OP_STB: 9062 case DIF_OP_STH: 9063 case DIF_OP_STW: 9064 case DIF_OP_STX: 9065 case DIF_OP_ALLOCS: 9066 case DIF_OP_CMP: 9067 case DIF_OP_SCMP: 9068 case DIF_OP_TST: 9069 case DIF_OP_BA: 9070 case DIF_OP_BE: 9071 case DIF_OP_BNE: 9072 case DIF_OP_BG: 9073 case DIF_OP_BGU: 9074 case DIF_OP_BGE: 9075 case DIF_OP_BGEU: 9076 case DIF_OP_BL: 9077 case DIF_OP_BLU: 9078 case DIF_OP_BLE: 9079 case DIF_OP_BLEU: 9080 case DIF_OP_RET: 9081 case DIF_OP_NOP: 9082 case DIF_OP_POPTS: 9083 case DIF_OP_FLUSHTS: 9084 case DIF_OP_SETX: 9085 case DIF_OP_SETS: 9086 case DIF_OP_LDGA: 9087 case DIF_OP_LDLS: 9088 case DIF_OP_STGS: 9089 case DIF_OP_STLS: 9090 case DIF_OP_PUSHTR: 9091 case DIF_OP_PUSHTV: 9092 break; 9093 9094 case DIF_OP_LDGS: 9095 if (v >= DIF_VAR_OTHER_UBASE) 9096 break; 9097 9098 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 9099 break; 9100 9101 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 9102 v == DIF_VAR_PPID || v == DIF_VAR_TID || 9103 v == DIF_VAR_EXECARGS || 9104 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9105 v == DIF_VAR_UID || v == DIF_VAR_GID) 9106 break; 9107 9108 err += efunc(pc, "illegal variable %u\n", v); 9109 break; 9110 9111 case DIF_OP_LDTA: 9112 case DIF_OP_LDTS: 9113 case DIF_OP_LDGAA: 9114 case DIF_OP_LDTAA: 9115 err += efunc(pc, "illegal dynamic variable load\n"); 9116 break; 9117 9118 case DIF_OP_STTS: 9119 case DIF_OP_STGAA: 9120 case DIF_OP_STTAA: 9121 err += efunc(pc, "illegal dynamic variable store\n"); 9122 break; 9123 9124 case DIF_OP_CALL: 9125 if (subr == DIF_SUBR_ALLOCA || 9126 subr == DIF_SUBR_BCOPY || 9127 subr == DIF_SUBR_COPYIN || 9128 subr == DIF_SUBR_COPYINTO || 9129 subr == DIF_SUBR_COPYINSTR || 9130 subr == DIF_SUBR_INDEX || 9131 subr == DIF_SUBR_INET_NTOA || 9132 subr == DIF_SUBR_INET_NTOA6 || 9133 subr == DIF_SUBR_INET_NTOP || 9134 subr == DIF_SUBR_LLTOSTR || 9135 subr == DIF_SUBR_RINDEX || 9136 subr == DIF_SUBR_STRCHR || 9137 subr == DIF_SUBR_STRJOIN || 9138 subr == DIF_SUBR_STRRCHR || 9139 subr == DIF_SUBR_STRSTR || 9140 subr == DIF_SUBR_HTONS || 9141 subr == DIF_SUBR_HTONL || 9142 subr == DIF_SUBR_HTONLL || 9143 subr == DIF_SUBR_NTOHS || 9144 subr == DIF_SUBR_NTOHL || 9145 subr == DIF_SUBR_NTOHLL || 9146 subr == DIF_SUBR_MEMREF || 9147 #if !defined(sun) 9148 subr == DIF_SUBR_MEMSTR || 9149 #endif 9150 subr == DIF_SUBR_TYPEREF) 9151 break; 9152 9153 err += efunc(pc, "invalid subr %u\n", subr); 9154 break; 9155 9156 default: 9157 err += efunc(pc, "invalid opcode %u\n", 9158 DIF_INSTR_OP(instr)); 9159 } 9160 } 9161 9162 return (err); 9163 } 9164 9165 /* 9166 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9167 * basis; 0 if not. 9168 */ 9169 static int 9170 dtrace_difo_cacheable(dtrace_difo_t *dp) 9171 { 9172 int i; 9173 9174 if (dp == NULL) 9175 return (0); 9176 9177 for (i = 0; i < dp->dtdo_varlen; i++) { 9178 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9179 9180 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9181 continue; 9182 9183 switch (v->dtdv_id) { 9184 case DIF_VAR_CURTHREAD: 9185 case DIF_VAR_PID: 9186 case DIF_VAR_TID: 9187 case DIF_VAR_EXECARGS: 9188 case DIF_VAR_EXECNAME: 9189 case DIF_VAR_ZONENAME: 9190 break; 9191 9192 default: 9193 return (0); 9194 } 9195 } 9196 9197 /* 9198 * This DIF object may be cacheable. Now we need to look for any 9199 * array loading instructions, any memory loading instructions, or 9200 * any stores to thread-local variables. 9201 */ 9202 for (i = 0; i < dp->dtdo_len; i++) { 9203 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9204 9205 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9206 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9207 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9208 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9209 return (0); 9210 } 9211 9212 return (1); 9213 } 9214 9215 static void 9216 dtrace_difo_hold(dtrace_difo_t *dp) 9217 { 9218 int i; 9219 9220 ASSERT(MUTEX_HELD(&dtrace_lock)); 9221 9222 dp->dtdo_refcnt++; 9223 ASSERT(dp->dtdo_refcnt != 0); 9224 9225 /* 9226 * We need to check this DIF object for references to the variable 9227 * DIF_VAR_VTIMESTAMP. 9228 */ 9229 for (i = 0; i < dp->dtdo_varlen; i++) { 9230 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9231 9232 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9233 continue; 9234 9235 if (dtrace_vtime_references++ == 0) 9236 dtrace_vtime_enable(); 9237 } 9238 } 9239 9240 /* 9241 * This routine calculates the dynamic variable chunksize for a given DIF 9242 * object. The calculation is not fool-proof, and can probably be tricked by 9243 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9244 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9245 * if a dynamic variable size exceeds the chunksize. 9246 */ 9247 static void 9248 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9249 { 9250 uint64_t sval = 0; 9251 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9252 const dif_instr_t *text = dp->dtdo_buf; 9253 uint_t pc, srd = 0; 9254 uint_t ttop = 0; 9255 size_t size, ksize; 9256 uint_t id, i; 9257 9258 for (pc = 0; pc < dp->dtdo_len; pc++) { 9259 dif_instr_t instr = text[pc]; 9260 uint_t op = DIF_INSTR_OP(instr); 9261 uint_t rd = DIF_INSTR_RD(instr); 9262 uint_t r1 = DIF_INSTR_R1(instr); 9263 uint_t nkeys = 0; 9264 uchar_t scope = 0; 9265 9266 dtrace_key_t *key = tupregs; 9267 9268 switch (op) { 9269 case DIF_OP_SETX: 9270 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9271 srd = rd; 9272 continue; 9273 9274 case DIF_OP_STTS: 9275 key = &tupregs[DIF_DTR_NREGS]; 9276 key[0].dttk_size = 0; 9277 key[1].dttk_size = 0; 9278 nkeys = 2; 9279 scope = DIFV_SCOPE_THREAD; 9280 break; 9281 9282 case DIF_OP_STGAA: 9283 case DIF_OP_STTAA: 9284 nkeys = ttop; 9285 9286 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9287 key[nkeys++].dttk_size = 0; 9288 9289 key[nkeys++].dttk_size = 0; 9290 9291 if (op == DIF_OP_STTAA) { 9292 scope = DIFV_SCOPE_THREAD; 9293 } else { 9294 scope = DIFV_SCOPE_GLOBAL; 9295 } 9296 9297 break; 9298 9299 case DIF_OP_PUSHTR: 9300 if (ttop == DIF_DTR_NREGS) 9301 return; 9302 9303 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9304 /* 9305 * If the register for the size of the "pushtr" 9306 * is %r0 (or the value is 0) and the type is 9307 * a string, we'll use the system-wide default 9308 * string size. 9309 */ 9310 tupregs[ttop++].dttk_size = 9311 dtrace_strsize_default; 9312 } else { 9313 if (srd == 0) 9314 return; 9315 9316 tupregs[ttop++].dttk_size = sval; 9317 } 9318 9319 break; 9320 9321 case DIF_OP_PUSHTV: 9322 if (ttop == DIF_DTR_NREGS) 9323 return; 9324 9325 tupregs[ttop++].dttk_size = 0; 9326 break; 9327 9328 case DIF_OP_FLUSHTS: 9329 ttop = 0; 9330 break; 9331 9332 case DIF_OP_POPTS: 9333 if (ttop != 0) 9334 ttop--; 9335 break; 9336 } 9337 9338 sval = 0; 9339 srd = 0; 9340 9341 if (nkeys == 0) 9342 continue; 9343 9344 /* 9345 * We have a dynamic variable allocation; calculate its size. 9346 */ 9347 for (ksize = 0, i = 0; i < nkeys; i++) 9348 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9349 9350 size = sizeof (dtrace_dynvar_t); 9351 size += sizeof (dtrace_key_t) * (nkeys - 1); 9352 size += ksize; 9353 9354 /* 9355 * Now we need to determine the size of the stored data. 9356 */ 9357 id = DIF_INSTR_VAR(instr); 9358 9359 for (i = 0; i < dp->dtdo_varlen; i++) { 9360 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9361 9362 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9363 size += v->dtdv_type.dtdt_size; 9364 break; 9365 } 9366 } 9367 9368 if (i == dp->dtdo_varlen) 9369 return; 9370 9371 /* 9372 * We have the size. If this is larger than the chunk size 9373 * for our dynamic variable state, reset the chunk size. 9374 */ 9375 size = P2ROUNDUP(size, sizeof (uint64_t)); 9376 9377 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9378 vstate->dtvs_dynvars.dtds_chunksize = size; 9379 } 9380 } 9381 9382 static void 9383 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9384 { 9385 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9386 uint_t id; 9387 9388 ASSERT(MUTEX_HELD(&dtrace_lock)); 9389 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9390 9391 for (i = 0; i < dp->dtdo_varlen; i++) { 9392 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9393 dtrace_statvar_t *svar, ***svarp = NULL; 9394 size_t dsize = 0; 9395 uint8_t scope = v->dtdv_scope; 9396 int *np = NULL; 9397 9398 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9399 continue; 9400 9401 id -= DIF_VAR_OTHER_UBASE; 9402 9403 switch (scope) { 9404 case DIFV_SCOPE_THREAD: 9405 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9406 dtrace_difv_t *tlocals; 9407 9408 if ((ntlocals = (otlocals << 1)) == 0) 9409 ntlocals = 1; 9410 9411 osz = otlocals * sizeof (dtrace_difv_t); 9412 nsz = ntlocals * sizeof (dtrace_difv_t); 9413 9414 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9415 9416 if (osz != 0) { 9417 bcopy(vstate->dtvs_tlocals, 9418 tlocals, osz); 9419 kmem_free(vstate->dtvs_tlocals, osz); 9420 } 9421 9422 vstate->dtvs_tlocals = tlocals; 9423 vstate->dtvs_ntlocals = ntlocals; 9424 } 9425 9426 vstate->dtvs_tlocals[id] = *v; 9427 continue; 9428 9429 case DIFV_SCOPE_LOCAL: 9430 np = &vstate->dtvs_nlocals; 9431 svarp = &vstate->dtvs_locals; 9432 9433 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9434 dsize = NCPU * (v->dtdv_type.dtdt_size + 9435 sizeof (uint64_t)); 9436 else 9437 dsize = NCPU * sizeof (uint64_t); 9438 9439 break; 9440 9441 case DIFV_SCOPE_GLOBAL: 9442 np = &vstate->dtvs_nglobals; 9443 svarp = &vstate->dtvs_globals; 9444 9445 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9446 dsize = v->dtdv_type.dtdt_size + 9447 sizeof (uint64_t); 9448 9449 break; 9450 9451 default: 9452 ASSERT(0); 9453 } 9454 9455 while (id >= (oldsvars = *np)) { 9456 dtrace_statvar_t **statics; 9457 int newsvars, oldsize, newsize; 9458 9459 if ((newsvars = (oldsvars << 1)) == 0) 9460 newsvars = 1; 9461 9462 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9463 newsize = newsvars * sizeof (dtrace_statvar_t *); 9464 9465 statics = kmem_zalloc(newsize, KM_SLEEP); 9466 9467 if (oldsize != 0) { 9468 bcopy(*svarp, statics, oldsize); 9469 kmem_free(*svarp, oldsize); 9470 } 9471 9472 *svarp = statics; 9473 *np = newsvars; 9474 } 9475 9476 if ((svar = (*svarp)[id]) == NULL) { 9477 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9478 svar->dtsv_var = *v; 9479 9480 if ((svar->dtsv_size = dsize) != 0) { 9481 svar->dtsv_data = (uint64_t)(uintptr_t) 9482 kmem_zalloc(dsize, KM_SLEEP); 9483 } 9484 9485 (*svarp)[id] = svar; 9486 } 9487 9488 svar->dtsv_refcnt++; 9489 } 9490 9491 dtrace_difo_chunksize(dp, vstate); 9492 dtrace_difo_hold(dp); 9493 } 9494 9495 static dtrace_difo_t * 9496 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9497 { 9498 dtrace_difo_t *new; 9499 size_t sz; 9500 9501 ASSERT(dp->dtdo_buf != NULL); 9502 ASSERT(dp->dtdo_refcnt != 0); 9503 9504 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9505 9506 ASSERT(dp->dtdo_buf != NULL); 9507 sz = dp->dtdo_len * sizeof (dif_instr_t); 9508 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9509 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9510 new->dtdo_len = dp->dtdo_len; 9511 9512 if (dp->dtdo_strtab != NULL) { 9513 ASSERT(dp->dtdo_strlen != 0); 9514 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9515 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9516 new->dtdo_strlen = dp->dtdo_strlen; 9517 } 9518 9519 if (dp->dtdo_inttab != NULL) { 9520 ASSERT(dp->dtdo_intlen != 0); 9521 sz = dp->dtdo_intlen * sizeof (uint64_t); 9522 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9523 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9524 new->dtdo_intlen = dp->dtdo_intlen; 9525 } 9526 9527 if (dp->dtdo_vartab != NULL) { 9528 ASSERT(dp->dtdo_varlen != 0); 9529 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9530 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9531 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9532 new->dtdo_varlen = dp->dtdo_varlen; 9533 } 9534 9535 dtrace_difo_init(new, vstate); 9536 return (new); 9537 } 9538 9539 static void 9540 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9541 { 9542 int i; 9543 9544 ASSERT(dp->dtdo_refcnt == 0); 9545 9546 for (i = 0; i < dp->dtdo_varlen; i++) { 9547 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9548 dtrace_statvar_t *svar, **svarp = NULL; 9549 uint_t id; 9550 uint8_t scope = v->dtdv_scope; 9551 int *np = NULL; 9552 9553 switch (scope) { 9554 case DIFV_SCOPE_THREAD: 9555 continue; 9556 9557 case DIFV_SCOPE_LOCAL: 9558 np = &vstate->dtvs_nlocals; 9559 svarp = vstate->dtvs_locals; 9560 break; 9561 9562 case DIFV_SCOPE_GLOBAL: 9563 np = &vstate->dtvs_nglobals; 9564 svarp = vstate->dtvs_globals; 9565 break; 9566 9567 default: 9568 ASSERT(0); 9569 } 9570 9571 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9572 continue; 9573 9574 id -= DIF_VAR_OTHER_UBASE; 9575 ASSERT(id < *np); 9576 9577 svar = svarp[id]; 9578 ASSERT(svar != NULL); 9579 ASSERT(svar->dtsv_refcnt > 0); 9580 9581 if (--svar->dtsv_refcnt > 0) 9582 continue; 9583 9584 if (svar->dtsv_size != 0) { 9585 ASSERT(svar->dtsv_data != 0); 9586 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9587 svar->dtsv_size); 9588 } 9589 9590 kmem_free(svar, sizeof (dtrace_statvar_t)); 9591 svarp[id] = NULL; 9592 } 9593 9594 if (dp->dtdo_buf != NULL) 9595 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9596 if (dp->dtdo_inttab != NULL) 9597 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9598 if (dp->dtdo_strtab != NULL) 9599 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9600 if (dp->dtdo_vartab != NULL) 9601 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9602 9603 kmem_free(dp, sizeof (dtrace_difo_t)); 9604 } 9605 9606 static void 9607 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9608 { 9609 int i; 9610 9611 ASSERT(MUTEX_HELD(&dtrace_lock)); 9612 ASSERT(dp->dtdo_refcnt != 0); 9613 9614 for (i = 0; i < dp->dtdo_varlen; i++) { 9615 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9616 9617 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9618 continue; 9619 9620 ASSERT(dtrace_vtime_references > 0); 9621 if (--dtrace_vtime_references == 0) 9622 dtrace_vtime_disable(); 9623 } 9624 9625 if (--dp->dtdo_refcnt == 0) 9626 dtrace_difo_destroy(dp, vstate); 9627 } 9628 9629 /* 9630 * DTrace Format Functions 9631 */ 9632 static uint16_t 9633 dtrace_format_add(dtrace_state_t *state, char *str) 9634 { 9635 char *fmt, **new; 9636 uint16_t ndx, len = strlen(str) + 1; 9637 9638 fmt = kmem_zalloc(len, KM_SLEEP); 9639 bcopy(str, fmt, len); 9640 9641 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9642 if (state->dts_formats[ndx] == NULL) { 9643 state->dts_formats[ndx] = fmt; 9644 return (ndx + 1); 9645 } 9646 } 9647 9648 if (state->dts_nformats == USHRT_MAX) { 9649 /* 9650 * This is only likely if a denial-of-service attack is being 9651 * attempted. As such, it's okay to fail silently here. 9652 */ 9653 kmem_free(fmt, len); 9654 return (0); 9655 } 9656 9657 /* 9658 * For simplicity, we always resize the formats array to be exactly the 9659 * number of formats. 9660 */ 9661 ndx = state->dts_nformats++; 9662 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9663 9664 if (state->dts_formats != NULL) { 9665 ASSERT(ndx != 0); 9666 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9667 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9668 } 9669 9670 state->dts_formats = new; 9671 state->dts_formats[ndx] = fmt; 9672 9673 return (ndx + 1); 9674 } 9675 9676 static void 9677 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9678 { 9679 char *fmt; 9680 9681 ASSERT(state->dts_formats != NULL); 9682 ASSERT(format <= state->dts_nformats); 9683 ASSERT(state->dts_formats[format - 1] != NULL); 9684 9685 fmt = state->dts_formats[format - 1]; 9686 kmem_free(fmt, strlen(fmt) + 1); 9687 state->dts_formats[format - 1] = NULL; 9688 } 9689 9690 static void 9691 dtrace_format_destroy(dtrace_state_t *state) 9692 { 9693 int i; 9694 9695 if (state->dts_nformats == 0) { 9696 ASSERT(state->dts_formats == NULL); 9697 return; 9698 } 9699 9700 ASSERT(state->dts_formats != NULL); 9701 9702 for (i = 0; i < state->dts_nformats; i++) { 9703 char *fmt = state->dts_formats[i]; 9704 9705 if (fmt == NULL) 9706 continue; 9707 9708 kmem_free(fmt, strlen(fmt) + 1); 9709 } 9710 9711 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9712 state->dts_nformats = 0; 9713 state->dts_formats = NULL; 9714 } 9715 9716 /* 9717 * DTrace Predicate Functions 9718 */ 9719 static dtrace_predicate_t * 9720 dtrace_predicate_create(dtrace_difo_t *dp) 9721 { 9722 dtrace_predicate_t *pred; 9723 9724 ASSERT(MUTEX_HELD(&dtrace_lock)); 9725 ASSERT(dp->dtdo_refcnt != 0); 9726 9727 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9728 pred->dtp_difo = dp; 9729 pred->dtp_refcnt = 1; 9730 9731 if (!dtrace_difo_cacheable(dp)) 9732 return (pred); 9733 9734 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9735 /* 9736 * This is only theoretically possible -- we have had 2^32 9737 * cacheable predicates on this machine. We cannot allow any 9738 * more predicates to become cacheable: as unlikely as it is, 9739 * there may be a thread caching a (now stale) predicate cache 9740 * ID. (N.B.: the temptation is being successfully resisted to 9741 * have this cmn_err() "Holy shit -- we executed this code!") 9742 */ 9743 return (pred); 9744 } 9745 9746 pred->dtp_cacheid = dtrace_predcache_id++; 9747 9748 return (pred); 9749 } 9750 9751 static void 9752 dtrace_predicate_hold(dtrace_predicate_t *pred) 9753 { 9754 ASSERT(MUTEX_HELD(&dtrace_lock)); 9755 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9756 ASSERT(pred->dtp_refcnt > 0); 9757 9758 pred->dtp_refcnt++; 9759 } 9760 9761 static void 9762 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9763 { 9764 dtrace_difo_t *dp = pred->dtp_difo; 9765 9766 ASSERT(MUTEX_HELD(&dtrace_lock)); 9767 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9768 ASSERT(pred->dtp_refcnt > 0); 9769 9770 if (--pred->dtp_refcnt == 0) { 9771 dtrace_difo_release(pred->dtp_difo, vstate); 9772 kmem_free(pred, sizeof (dtrace_predicate_t)); 9773 } 9774 } 9775 9776 /* 9777 * DTrace Action Description Functions 9778 */ 9779 static dtrace_actdesc_t * 9780 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9781 uint64_t uarg, uint64_t arg) 9782 { 9783 dtrace_actdesc_t *act; 9784 9785 #if defined(sun) 9786 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9787 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9788 #endif 9789 9790 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9791 act->dtad_kind = kind; 9792 act->dtad_ntuple = ntuple; 9793 act->dtad_uarg = uarg; 9794 act->dtad_arg = arg; 9795 act->dtad_refcnt = 1; 9796 9797 return (act); 9798 } 9799 9800 static void 9801 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9802 { 9803 ASSERT(act->dtad_refcnt >= 1); 9804 act->dtad_refcnt++; 9805 } 9806 9807 static void 9808 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9809 { 9810 dtrace_actkind_t kind = act->dtad_kind; 9811 dtrace_difo_t *dp; 9812 9813 ASSERT(act->dtad_refcnt >= 1); 9814 9815 if (--act->dtad_refcnt != 0) 9816 return; 9817 9818 if ((dp = act->dtad_difo) != NULL) 9819 dtrace_difo_release(dp, vstate); 9820 9821 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9822 char *str = (char *)(uintptr_t)act->dtad_arg; 9823 9824 #if defined(sun) 9825 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9826 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9827 #endif 9828 9829 if (str != NULL) 9830 kmem_free(str, strlen(str) + 1); 9831 } 9832 9833 kmem_free(act, sizeof (dtrace_actdesc_t)); 9834 } 9835 9836 /* 9837 * DTrace ECB Functions 9838 */ 9839 static dtrace_ecb_t * 9840 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9841 { 9842 dtrace_ecb_t *ecb; 9843 dtrace_epid_t epid; 9844 9845 ASSERT(MUTEX_HELD(&dtrace_lock)); 9846 9847 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9848 ecb->dte_predicate = NULL; 9849 ecb->dte_probe = probe; 9850 9851 /* 9852 * The default size is the size of the default action: recording 9853 * the header. 9854 */ 9855 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 9856 ecb->dte_alignment = sizeof (dtrace_epid_t); 9857 9858 epid = state->dts_epid++; 9859 9860 if (epid - 1 >= state->dts_necbs) { 9861 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9862 int necbs = state->dts_necbs << 1; 9863 9864 ASSERT(epid == state->dts_necbs + 1); 9865 9866 if (necbs == 0) { 9867 ASSERT(oecbs == NULL); 9868 necbs = 1; 9869 } 9870 9871 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9872 9873 if (oecbs != NULL) 9874 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9875 9876 dtrace_membar_producer(); 9877 state->dts_ecbs = ecbs; 9878 9879 if (oecbs != NULL) { 9880 /* 9881 * If this state is active, we must dtrace_sync() 9882 * before we can free the old dts_ecbs array: we're 9883 * coming in hot, and there may be active ring 9884 * buffer processing (which indexes into the dts_ecbs 9885 * array) on another CPU. 9886 */ 9887 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9888 dtrace_sync(); 9889 9890 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9891 } 9892 9893 dtrace_membar_producer(); 9894 state->dts_necbs = necbs; 9895 } 9896 9897 ecb->dte_state = state; 9898 9899 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9900 dtrace_membar_producer(); 9901 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9902 9903 return (ecb); 9904 } 9905 9906 static void 9907 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9908 { 9909 dtrace_probe_t *probe = ecb->dte_probe; 9910 9911 ASSERT(MUTEX_HELD(&cpu_lock)); 9912 ASSERT(MUTEX_HELD(&dtrace_lock)); 9913 ASSERT(ecb->dte_next == NULL); 9914 9915 if (probe == NULL) { 9916 /* 9917 * This is the NULL probe -- there's nothing to do. 9918 */ 9919 return; 9920 } 9921 9922 if (probe->dtpr_ecb == NULL) { 9923 dtrace_provider_t *prov = probe->dtpr_provider; 9924 9925 /* 9926 * We're the first ECB on this probe. 9927 */ 9928 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9929 9930 if (ecb->dte_predicate != NULL) 9931 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9932 9933 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9934 probe->dtpr_id, probe->dtpr_arg); 9935 } else { 9936 /* 9937 * This probe is already active. Swing the last pointer to 9938 * point to the new ECB, and issue a dtrace_sync() to assure 9939 * that all CPUs have seen the change. 9940 */ 9941 ASSERT(probe->dtpr_ecb_last != NULL); 9942 probe->dtpr_ecb_last->dte_next = ecb; 9943 probe->dtpr_ecb_last = ecb; 9944 probe->dtpr_predcache = 0; 9945 9946 dtrace_sync(); 9947 } 9948 } 9949 9950 static void 9951 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9952 { 9953 dtrace_action_t *act; 9954 uint32_t curneeded = UINT32_MAX; 9955 uint32_t aggbase = UINT32_MAX; 9956 9957 /* 9958 * If we record anything, we always record the dtrace_rechdr_t. (And 9959 * we always record it first.) 9960 */ 9961 ecb->dte_size = sizeof (dtrace_rechdr_t); 9962 ecb->dte_alignment = sizeof (dtrace_epid_t); 9963 9964 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9965 dtrace_recdesc_t *rec = &act->dta_rec; 9966 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 9967 9968 ecb->dte_alignment = MAX(ecb->dte_alignment, 9969 rec->dtrd_alignment); 9970 9971 if (DTRACEACT_ISAGG(act->dta_kind)) { 9972 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9973 9974 ASSERT(rec->dtrd_size != 0); 9975 ASSERT(agg->dtag_first != NULL); 9976 ASSERT(act->dta_prev->dta_intuple); 9977 ASSERT(aggbase != UINT32_MAX); 9978 ASSERT(curneeded != UINT32_MAX); 9979 9980 agg->dtag_base = aggbase; 9981 9982 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9983 rec->dtrd_offset = curneeded; 9984 curneeded += rec->dtrd_size; 9985 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 9986 9987 aggbase = UINT32_MAX; 9988 curneeded = UINT32_MAX; 9989 } else if (act->dta_intuple) { 9990 if (curneeded == UINT32_MAX) { 9991 /* 9992 * This is the first record in a tuple. Align 9993 * curneeded to be at offset 4 in an 8-byte 9994 * aligned block. 9995 */ 9996 ASSERT(act->dta_prev == NULL || 9997 !act->dta_prev->dta_intuple); 9998 ASSERT3U(aggbase, ==, UINT32_MAX); 9999 curneeded = P2PHASEUP(ecb->dte_size, 10000 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 10001 10002 aggbase = curneeded - sizeof (dtrace_aggid_t); 10003 ASSERT(IS_P2ALIGNED(aggbase, 10004 sizeof (uint64_t))); 10005 } 10006 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 10007 rec->dtrd_offset = curneeded; 10008 curneeded += rec->dtrd_size; 10009 } else { 10010 /* tuples must be followed by an aggregation */ 10011 ASSERT(act->dta_prev == NULL || 10012 !act->dta_prev->dta_intuple); 10013 10014 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 10015 rec->dtrd_alignment); 10016 rec->dtrd_offset = ecb->dte_size; 10017 ecb->dte_size += rec->dtrd_size; 10018 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 10019 } 10020 } 10021 10022 if ((act = ecb->dte_action) != NULL && 10023 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 10024 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 10025 /* 10026 * If the size is still sizeof (dtrace_rechdr_t), then all 10027 * actions store no data; set the size to 0. 10028 */ 10029 ecb->dte_size = 0; 10030 } 10031 10032 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 10033 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 10034 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 10035 ecb->dte_needed); 10036 } 10037 10038 static dtrace_action_t * 10039 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10040 { 10041 dtrace_aggregation_t *agg; 10042 size_t size = sizeof (uint64_t); 10043 int ntuple = desc->dtad_ntuple; 10044 dtrace_action_t *act; 10045 dtrace_recdesc_t *frec; 10046 dtrace_aggid_t aggid; 10047 dtrace_state_t *state = ecb->dte_state; 10048 10049 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 10050 agg->dtag_ecb = ecb; 10051 10052 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 10053 10054 switch (desc->dtad_kind) { 10055 case DTRACEAGG_MIN: 10056 agg->dtag_initial = INT64_MAX; 10057 agg->dtag_aggregate = dtrace_aggregate_min; 10058 break; 10059 10060 case DTRACEAGG_MAX: 10061 agg->dtag_initial = INT64_MIN; 10062 agg->dtag_aggregate = dtrace_aggregate_max; 10063 break; 10064 10065 case DTRACEAGG_COUNT: 10066 agg->dtag_aggregate = dtrace_aggregate_count; 10067 break; 10068 10069 case DTRACEAGG_QUANTIZE: 10070 agg->dtag_aggregate = dtrace_aggregate_quantize; 10071 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 10072 sizeof (uint64_t); 10073 break; 10074 10075 case DTRACEAGG_LQUANTIZE: { 10076 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10077 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10078 10079 agg->dtag_initial = desc->dtad_arg; 10080 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10081 10082 if (step == 0 || levels == 0) 10083 goto err; 10084 10085 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10086 break; 10087 } 10088 10089 case DTRACEAGG_LLQUANTIZE: { 10090 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10091 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10092 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10093 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10094 int64_t v; 10095 10096 agg->dtag_initial = desc->dtad_arg; 10097 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10098 10099 if (factor < 2 || low >= high || nsteps < factor) 10100 goto err; 10101 10102 /* 10103 * Now check that the number of steps evenly divides a power 10104 * of the factor. (This assures both integer bucket size and 10105 * linearity within each magnitude.) 10106 */ 10107 for (v = factor; v < nsteps; v *= factor) 10108 continue; 10109 10110 if ((v % nsteps) || (nsteps % factor)) 10111 goto err; 10112 10113 size = (dtrace_aggregate_llquantize_bucket(factor, 10114 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10115 break; 10116 } 10117 10118 case DTRACEAGG_AVG: 10119 agg->dtag_aggregate = dtrace_aggregate_avg; 10120 size = sizeof (uint64_t) * 2; 10121 break; 10122 10123 case DTRACEAGG_STDDEV: 10124 agg->dtag_aggregate = dtrace_aggregate_stddev; 10125 size = sizeof (uint64_t) * 4; 10126 break; 10127 10128 case DTRACEAGG_SUM: 10129 agg->dtag_aggregate = dtrace_aggregate_sum; 10130 break; 10131 10132 default: 10133 goto err; 10134 } 10135 10136 agg->dtag_action.dta_rec.dtrd_size = size; 10137 10138 if (ntuple == 0) 10139 goto err; 10140 10141 /* 10142 * We must make sure that we have enough actions for the n-tuple. 10143 */ 10144 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10145 if (DTRACEACT_ISAGG(act->dta_kind)) 10146 break; 10147 10148 if (--ntuple == 0) { 10149 /* 10150 * This is the action with which our n-tuple begins. 10151 */ 10152 agg->dtag_first = act; 10153 goto success; 10154 } 10155 } 10156 10157 /* 10158 * This n-tuple is short by ntuple elements. Return failure. 10159 */ 10160 ASSERT(ntuple != 0); 10161 err: 10162 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10163 return (NULL); 10164 10165 success: 10166 /* 10167 * If the last action in the tuple has a size of zero, it's actually 10168 * an expression argument for the aggregating action. 10169 */ 10170 ASSERT(ecb->dte_action_last != NULL); 10171 act = ecb->dte_action_last; 10172 10173 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10174 ASSERT(act->dta_difo != NULL); 10175 10176 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10177 agg->dtag_hasarg = 1; 10178 } 10179 10180 /* 10181 * We need to allocate an id for this aggregation. 10182 */ 10183 #if defined(sun) 10184 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10185 VM_BESTFIT | VM_SLEEP); 10186 #else 10187 aggid = alloc_unr(state->dts_aggid_arena); 10188 #endif 10189 10190 if (aggid - 1 >= state->dts_naggregations) { 10191 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10192 dtrace_aggregation_t **aggs; 10193 int naggs = state->dts_naggregations << 1; 10194 int onaggs = state->dts_naggregations; 10195 10196 ASSERT(aggid == state->dts_naggregations + 1); 10197 10198 if (naggs == 0) { 10199 ASSERT(oaggs == NULL); 10200 naggs = 1; 10201 } 10202 10203 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10204 10205 if (oaggs != NULL) { 10206 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10207 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10208 } 10209 10210 state->dts_aggregations = aggs; 10211 state->dts_naggregations = naggs; 10212 } 10213 10214 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10215 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10216 10217 frec = &agg->dtag_first->dta_rec; 10218 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10219 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10220 10221 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10222 ASSERT(!act->dta_intuple); 10223 act->dta_intuple = 1; 10224 } 10225 10226 return (&agg->dtag_action); 10227 } 10228 10229 static void 10230 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10231 { 10232 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10233 dtrace_state_t *state = ecb->dte_state; 10234 dtrace_aggid_t aggid = agg->dtag_id; 10235 10236 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10237 #if defined(sun) 10238 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10239 #else 10240 free_unr(state->dts_aggid_arena, aggid); 10241 #endif 10242 10243 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10244 state->dts_aggregations[aggid - 1] = NULL; 10245 10246 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10247 } 10248 10249 static int 10250 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10251 { 10252 dtrace_action_t *action, *last; 10253 dtrace_difo_t *dp = desc->dtad_difo; 10254 uint32_t size = 0, align = sizeof (uint8_t), mask; 10255 uint16_t format = 0; 10256 dtrace_recdesc_t *rec; 10257 dtrace_state_t *state = ecb->dte_state; 10258 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10259 uint64_t arg = desc->dtad_arg; 10260 10261 ASSERT(MUTEX_HELD(&dtrace_lock)); 10262 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10263 10264 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10265 /* 10266 * If this is an aggregating action, there must be neither 10267 * a speculate nor a commit on the action chain. 10268 */ 10269 dtrace_action_t *act; 10270 10271 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10272 if (act->dta_kind == DTRACEACT_COMMIT) 10273 return (EINVAL); 10274 10275 if (act->dta_kind == DTRACEACT_SPECULATE) 10276 return (EINVAL); 10277 } 10278 10279 action = dtrace_ecb_aggregation_create(ecb, desc); 10280 10281 if (action == NULL) 10282 return (EINVAL); 10283 } else { 10284 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10285 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10286 dp != NULL && dp->dtdo_destructive)) { 10287 state->dts_destructive = 1; 10288 } 10289 10290 switch (desc->dtad_kind) { 10291 case DTRACEACT_PRINTF: 10292 case DTRACEACT_PRINTA: 10293 case DTRACEACT_SYSTEM: 10294 case DTRACEACT_FREOPEN: 10295 case DTRACEACT_DIFEXPR: 10296 /* 10297 * We know that our arg is a string -- turn it into a 10298 * format. 10299 */ 10300 if (arg == 0) { 10301 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10302 desc->dtad_kind == DTRACEACT_DIFEXPR); 10303 format = 0; 10304 } else { 10305 ASSERT(arg != 0); 10306 #if defined(sun) 10307 ASSERT(arg > KERNELBASE); 10308 #endif 10309 format = dtrace_format_add(state, 10310 (char *)(uintptr_t)arg); 10311 } 10312 10313 /*FALLTHROUGH*/ 10314 case DTRACEACT_LIBACT: 10315 case DTRACEACT_TRACEMEM: 10316 case DTRACEACT_TRACEMEM_DYNSIZE: 10317 if (dp == NULL) 10318 return (EINVAL); 10319 10320 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10321 break; 10322 10323 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10324 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10325 return (EINVAL); 10326 10327 size = opt[DTRACEOPT_STRSIZE]; 10328 } 10329 10330 break; 10331 10332 case DTRACEACT_STACK: 10333 if ((nframes = arg) == 0) { 10334 nframes = opt[DTRACEOPT_STACKFRAMES]; 10335 ASSERT(nframes > 0); 10336 arg = nframes; 10337 } 10338 10339 size = nframes * sizeof (pc_t); 10340 break; 10341 10342 case DTRACEACT_JSTACK: 10343 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10344 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10345 10346 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10347 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10348 10349 arg = DTRACE_USTACK_ARG(nframes, strsize); 10350 10351 /*FALLTHROUGH*/ 10352 case DTRACEACT_USTACK: 10353 if (desc->dtad_kind != DTRACEACT_JSTACK && 10354 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10355 strsize = DTRACE_USTACK_STRSIZE(arg); 10356 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10357 ASSERT(nframes > 0); 10358 arg = DTRACE_USTACK_ARG(nframes, strsize); 10359 } 10360 10361 /* 10362 * Save a slot for the pid. 10363 */ 10364 size = (nframes + 1) * sizeof (uint64_t); 10365 size += DTRACE_USTACK_STRSIZE(arg); 10366 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10367 10368 break; 10369 10370 case DTRACEACT_SYM: 10371 case DTRACEACT_MOD: 10372 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10373 sizeof (uint64_t)) || 10374 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10375 return (EINVAL); 10376 break; 10377 10378 case DTRACEACT_USYM: 10379 case DTRACEACT_UMOD: 10380 case DTRACEACT_UADDR: 10381 if (dp == NULL || 10382 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10383 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10384 return (EINVAL); 10385 10386 /* 10387 * We have a slot for the pid, plus a slot for the 10388 * argument. To keep things simple (aligned with 10389 * bitness-neutral sizing), we store each as a 64-bit 10390 * quantity. 10391 */ 10392 size = 2 * sizeof (uint64_t); 10393 break; 10394 10395 case DTRACEACT_STOP: 10396 case DTRACEACT_BREAKPOINT: 10397 case DTRACEACT_PANIC: 10398 break; 10399 10400 case DTRACEACT_CHILL: 10401 case DTRACEACT_DISCARD: 10402 case DTRACEACT_RAISE: 10403 if (dp == NULL) 10404 return (EINVAL); 10405 break; 10406 10407 case DTRACEACT_EXIT: 10408 if (dp == NULL || 10409 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10410 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10411 return (EINVAL); 10412 break; 10413 10414 case DTRACEACT_SPECULATE: 10415 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 10416 return (EINVAL); 10417 10418 if (dp == NULL) 10419 return (EINVAL); 10420 10421 state->dts_speculates = 1; 10422 break; 10423 10424 case DTRACEACT_PRINTM: 10425 size = dp->dtdo_rtype.dtdt_size; 10426 break; 10427 10428 case DTRACEACT_PRINTT: 10429 size = dp->dtdo_rtype.dtdt_size; 10430 break; 10431 10432 case DTRACEACT_COMMIT: { 10433 dtrace_action_t *act = ecb->dte_action; 10434 10435 for (; act != NULL; act = act->dta_next) { 10436 if (act->dta_kind == DTRACEACT_COMMIT) 10437 return (EINVAL); 10438 } 10439 10440 if (dp == NULL) 10441 return (EINVAL); 10442 break; 10443 } 10444 10445 default: 10446 return (EINVAL); 10447 } 10448 10449 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10450 /* 10451 * If this is a data-storing action or a speculate, 10452 * we must be sure that there isn't a commit on the 10453 * action chain. 10454 */ 10455 dtrace_action_t *act = ecb->dte_action; 10456 10457 for (; act != NULL; act = act->dta_next) { 10458 if (act->dta_kind == DTRACEACT_COMMIT) 10459 return (EINVAL); 10460 } 10461 } 10462 10463 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10464 action->dta_rec.dtrd_size = size; 10465 } 10466 10467 action->dta_refcnt = 1; 10468 rec = &action->dta_rec; 10469 size = rec->dtrd_size; 10470 10471 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10472 if (!(size & mask)) { 10473 align = mask + 1; 10474 break; 10475 } 10476 } 10477 10478 action->dta_kind = desc->dtad_kind; 10479 10480 if ((action->dta_difo = dp) != NULL) 10481 dtrace_difo_hold(dp); 10482 10483 rec->dtrd_action = action->dta_kind; 10484 rec->dtrd_arg = arg; 10485 rec->dtrd_uarg = desc->dtad_uarg; 10486 rec->dtrd_alignment = (uint16_t)align; 10487 rec->dtrd_format = format; 10488 10489 if ((last = ecb->dte_action_last) != NULL) { 10490 ASSERT(ecb->dte_action != NULL); 10491 action->dta_prev = last; 10492 last->dta_next = action; 10493 } else { 10494 ASSERT(ecb->dte_action == NULL); 10495 ecb->dte_action = action; 10496 } 10497 10498 ecb->dte_action_last = action; 10499 10500 return (0); 10501 } 10502 10503 static void 10504 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10505 { 10506 dtrace_action_t *act = ecb->dte_action, *next; 10507 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10508 dtrace_difo_t *dp; 10509 uint16_t format; 10510 10511 if (act != NULL && act->dta_refcnt > 1) { 10512 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10513 act->dta_refcnt--; 10514 } else { 10515 for (; act != NULL; act = next) { 10516 next = act->dta_next; 10517 ASSERT(next != NULL || act == ecb->dte_action_last); 10518 ASSERT(act->dta_refcnt == 1); 10519 10520 if ((format = act->dta_rec.dtrd_format) != 0) 10521 dtrace_format_remove(ecb->dte_state, format); 10522 10523 if ((dp = act->dta_difo) != NULL) 10524 dtrace_difo_release(dp, vstate); 10525 10526 if (DTRACEACT_ISAGG(act->dta_kind)) { 10527 dtrace_ecb_aggregation_destroy(ecb, act); 10528 } else { 10529 kmem_free(act, sizeof (dtrace_action_t)); 10530 } 10531 } 10532 } 10533 10534 ecb->dte_action = NULL; 10535 ecb->dte_action_last = NULL; 10536 ecb->dte_size = 0; 10537 } 10538 10539 static void 10540 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10541 { 10542 /* 10543 * We disable the ECB by removing it from its probe. 10544 */ 10545 dtrace_ecb_t *pecb, *prev = NULL; 10546 dtrace_probe_t *probe = ecb->dte_probe; 10547 10548 ASSERT(MUTEX_HELD(&dtrace_lock)); 10549 10550 if (probe == NULL) { 10551 /* 10552 * This is the NULL probe; there is nothing to disable. 10553 */ 10554 return; 10555 } 10556 10557 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10558 if (pecb == ecb) 10559 break; 10560 prev = pecb; 10561 } 10562 10563 ASSERT(pecb != NULL); 10564 10565 if (prev == NULL) { 10566 probe->dtpr_ecb = ecb->dte_next; 10567 } else { 10568 prev->dte_next = ecb->dte_next; 10569 } 10570 10571 if (ecb == probe->dtpr_ecb_last) { 10572 ASSERT(ecb->dte_next == NULL); 10573 probe->dtpr_ecb_last = prev; 10574 } 10575 10576 /* 10577 * The ECB has been disconnected from the probe; now sync to assure 10578 * that all CPUs have seen the change before returning. 10579 */ 10580 dtrace_sync(); 10581 10582 if (probe->dtpr_ecb == NULL) { 10583 /* 10584 * That was the last ECB on the probe; clear the predicate 10585 * cache ID for the probe, disable it and sync one more time 10586 * to assure that we'll never hit it again. 10587 */ 10588 dtrace_provider_t *prov = probe->dtpr_provider; 10589 10590 ASSERT(ecb->dte_next == NULL); 10591 ASSERT(probe->dtpr_ecb_last == NULL); 10592 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10593 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10594 probe->dtpr_id, probe->dtpr_arg); 10595 dtrace_sync(); 10596 } else { 10597 /* 10598 * There is at least one ECB remaining on the probe. If there 10599 * is _exactly_ one, set the probe's predicate cache ID to be 10600 * the predicate cache ID of the remaining ECB. 10601 */ 10602 ASSERT(probe->dtpr_ecb_last != NULL); 10603 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10604 10605 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10606 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10607 10608 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10609 10610 if (p != NULL) 10611 probe->dtpr_predcache = p->dtp_cacheid; 10612 } 10613 10614 ecb->dte_next = NULL; 10615 } 10616 } 10617 10618 static void 10619 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10620 { 10621 dtrace_state_t *state = ecb->dte_state; 10622 dtrace_vstate_t *vstate = &state->dts_vstate; 10623 dtrace_predicate_t *pred; 10624 dtrace_epid_t epid = ecb->dte_epid; 10625 10626 ASSERT(MUTEX_HELD(&dtrace_lock)); 10627 ASSERT(ecb->dte_next == NULL); 10628 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10629 10630 if ((pred = ecb->dte_predicate) != NULL) 10631 dtrace_predicate_release(pred, vstate); 10632 10633 dtrace_ecb_action_remove(ecb); 10634 10635 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10636 state->dts_ecbs[epid - 1] = NULL; 10637 10638 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10639 } 10640 10641 static dtrace_ecb_t * 10642 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10643 dtrace_enabling_t *enab) 10644 { 10645 dtrace_ecb_t *ecb; 10646 dtrace_predicate_t *pred; 10647 dtrace_actdesc_t *act; 10648 dtrace_provider_t *prov; 10649 dtrace_ecbdesc_t *desc = enab->dten_current; 10650 10651 ASSERT(MUTEX_HELD(&dtrace_lock)); 10652 ASSERT(state != NULL); 10653 10654 ecb = dtrace_ecb_add(state, probe); 10655 ecb->dte_uarg = desc->dted_uarg; 10656 10657 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10658 dtrace_predicate_hold(pred); 10659 ecb->dte_predicate = pred; 10660 } 10661 10662 if (probe != NULL) { 10663 /* 10664 * If the provider shows more leg than the consumer is old 10665 * enough to see, we need to enable the appropriate implicit 10666 * predicate bits to prevent the ecb from activating at 10667 * revealing times. 10668 * 10669 * Providers specifying DTRACE_PRIV_USER at register time 10670 * are stating that they need the /proc-style privilege 10671 * model to be enforced, and this is what DTRACE_COND_OWNER 10672 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10673 */ 10674 prov = probe->dtpr_provider; 10675 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10676 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10677 ecb->dte_cond |= DTRACE_COND_OWNER; 10678 10679 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10680 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10681 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10682 10683 /* 10684 * If the provider shows us kernel innards and the user 10685 * is lacking sufficient privilege, enable the 10686 * DTRACE_COND_USERMODE implicit predicate. 10687 */ 10688 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10689 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10690 ecb->dte_cond |= DTRACE_COND_USERMODE; 10691 } 10692 10693 if (dtrace_ecb_create_cache != NULL) { 10694 /* 10695 * If we have a cached ecb, we'll use its action list instead 10696 * of creating our own (saving both time and space). 10697 */ 10698 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10699 dtrace_action_t *act = cached->dte_action; 10700 10701 if (act != NULL) { 10702 ASSERT(act->dta_refcnt > 0); 10703 act->dta_refcnt++; 10704 ecb->dte_action = act; 10705 ecb->dte_action_last = cached->dte_action_last; 10706 ecb->dte_needed = cached->dte_needed; 10707 ecb->dte_size = cached->dte_size; 10708 ecb->dte_alignment = cached->dte_alignment; 10709 } 10710 10711 return (ecb); 10712 } 10713 10714 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10715 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10716 dtrace_ecb_destroy(ecb); 10717 return (NULL); 10718 } 10719 } 10720 10721 dtrace_ecb_resize(ecb); 10722 10723 return (dtrace_ecb_create_cache = ecb); 10724 } 10725 10726 static int 10727 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10728 { 10729 dtrace_ecb_t *ecb; 10730 dtrace_enabling_t *enab = arg; 10731 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10732 10733 ASSERT(state != NULL); 10734 10735 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10736 /* 10737 * This probe was created in a generation for which this 10738 * enabling has previously created ECBs; we don't want to 10739 * enable it again, so just kick out. 10740 */ 10741 return (DTRACE_MATCH_NEXT); 10742 } 10743 10744 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10745 return (DTRACE_MATCH_DONE); 10746 10747 dtrace_ecb_enable(ecb); 10748 return (DTRACE_MATCH_NEXT); 10749 } 10750 10751 static dtrace_ecb_t * 10752 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10753 { 10754 dtrace_ecb_t *ecb; 10755 10756 ASSERT(MUTEX_HELD(&dtrace_lock)); 10757 10758 if (id == 0 || id > state->dts_necbs) 10759 return (NULL); 10760 10761 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10762 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10763 10764 return (state->dts_ecbs[id - 1]); 10765 } 10766 10767 static dtrace_aggregation_t * 10768 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10769 { 10770 dtrace_aggregation_t *agg; 10771 10772 ASSERT(MUTEX_HELD(&dtrace_lock)); 10773 10774 if (id == 0 || id > state->dts_naggregations) 10775 return (NULL); 10776 10777 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10778 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10779 agg->dtag_id == id); 10780 10781 return (state->dts_aggregations[id - 1]); 10782 } 10783 10784 /* 10785 * DTrace Buffer Functions 10786 * 10787 * The following functions manipulate DTrace buffers. Most of these functions 10788 * are called in the context of establishing or processing consumer state; 10789 * exceptions are explicitly noted. 10790 */ 10791 10792 /* 10793 * Note: called from cross call context. This function switches the two 10794 * buffers on a given CPU. The atomicity of this operation is assured by 10795 * disabling interrupts while the actual switch takes place; the disabling of 10796 * interrupts serializes the execution with any execution of dtrace_probe() on 10797 * the same CPU. 10798 */ 10799 static void 10800 dtrace_buffer_switch(dtrace_buffer_t *buf) 10801 { 10802 caddr_t tomax = buf->dtb_tomax; 10803 caddr_t xamot = buf->dtb_xamot; 10804 dtrace_icookie_t cookie; 10805 hrtime_t now; 10806 10807 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10808 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10809 10810 cookie = dtrace_interrupt_disable(); 10811 now = dtrace_gethrtime(); 10812 buf->dtb_tomax = xamot; 10813 buf->dtb_xamot = tomax; 10814 buf->dtb_xamot_drops = buf->dtb_drops; 10815 buf->dtb_xamot_offset = buf->dtb_offset; 10816 buf->dtb_xamot_errors = buf->dtb_errors; 10817 buf->dtb_xamot_flags = buf->dtb_flags; 10818 buf->dtb_offset = 0; 10819 buf->dtb_drops = 0; 10820 buf->dtb_errors = 0; 10821 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10822 buf->dtb_interval = now - buf->dtb_switched; 10823 buf->dtb_switched = now; 10824 dtrace_interrupt_enable(cookie); 10825 } 10826 10827 /* 10828 * Note: called from cross call context. This function activates a buffer 10829 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10830 * is guaranteed by the disabling of interrupts. 10831 */ 10832 static void 10833 dtrace_buffer_activate(dtrace_state_t *state) 10834 { 10835 dtrace_buffer_t *buf; 10836 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10837 10838 buf = &state->dts_buffer[curcpu]; 10839 10840 if (buf->dtb_tomax != NULL) { 10841 /* 10842 * We might like to assert that the buffer is marked inactive, 10843 * but this isn't necessarily true: the buffer for the CPU 10844 * that processes the BEGIN probe has its buffer activated 10845 * manually. In this case, we take the (harmless) action 10846 * re-clearing the bit INACTIVE bit. 10847 */ 10848 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10849 } 10850 10851 dtrace_interrupt_enable(cookie); 10852 } 10853 10854 static int 10855 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10856 processorid_t cpu) 10857 { 10858 #if defined(sun) 10859 cpu_t *cp; 10860 #endif 10861 dtrace_buffer_t *buf; 10862 10863 #if defined(sun) 10864 ASSERT(MUTEX_HELD(&cpu_lock)); 10865 ASSERT(MUTEX_HELD(&dtrace_lock)); 10866 10867 if (size > dtrace_nonroot_maxsize && 10868 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10869 return (EFBIG); 10870 10871 cp = cpu_list; 10872 10873 do { 10874 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10875 continue; 10876 10877 buf = &bufs[cp->cpu_id]; 10878 10879 /* 10880 * If there is already a buffer allocated for this CPU, it 10881 * is only possible that this is a DR event. In this case, 10882 */ 10883 if (buf->dtb_tomax != NULL) { 10884 ASSERT(buf->dtb_size == size); 10885 continue; 10886 } 10887 10888 ASSERT(buf->dtb_xamot == NULL); 10889 10890 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10891 goto err; 10892 10893 buf->dtb_size = size; 10894 buf->dtb_flags = flags; 10895 buf->dtb_offset = 0; 10896 buf->dtb_drops = 0; 10897 10898 if (flags & DTRACEBUF_NOSWITCH) 10899 continue; 10900 10901 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10902 goto err; 10903 } while ((cp = cp->cpu_next) != cpu_list); 10904 10905 return (0); 10906 10907 err: 10908 cp = cpu_list; 10909 10910 do { 10911 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10912 continue; 10913 10914 buf = &bufs[cp->cpu_id]; 10915 10916 if (buf->dtb_xamot != NULL) { 10917 ASSERT(buf->dtb_tomax != NULL); 10918 ASSERT(buf->dtb_size == size); 10919 kmem_free(buf->dtb_xamot, size); 10920 } 10921 10922 if (buf->dtb_tomax != NULL) { 10923 ASSERT(buf->dtb_size == size); 10924 kmem_free(buf->dtb_tomax, size); 10925 } 10926 10927 buf->dtb_tomax = NULL; 10928 buf->dtb_xamot = NULL; 10929 buf->dtb_size = 0; 10930 } while ((cp = cp->cpu_next) != cpu_list); 10931 10932 return (ENOMEM); 10933 #else 10934 int i; 10935 10936 #if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 10937 /* 10938 * FreeBSD isn't good at limiting the amount of memory we 10939 * ask to malloc, so let's place a limit here before trying 10940 * to do something that might well end in tears at bedtime. 10941 */ 10942 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10943 return(ENOMEM); 10944 #endif 10945 10946 ASSERT(MUTEX_HELD(&dtrace_lock)); 10947 CPU_FOREACH(i) { 10948 if (cpu != DTRACE_CPUALL && cpu != i) 10949 continue; 10950 10951 buf = &bufs[i]; 10952 10953 /* 10954 * If there is already a buffer allocated for this CPU, it 10955 * is only possible that this is a DR event. In this case, 10956 * the buffer size must match our specified size. 10957 */ 10958 if (buf->dtb_tomax != NULL) { 10959 ASSERT(buf->dtb_size == size); 10960 continue; 10961 } 10962 10963 ASSERT(buf->dtb_xamot == NULL); 10964 10965 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10966 goto err; 10967 10968 buf->dtb_size = size; 10969 buf->dtb_flags = flags; 10970 buf->dtb_offset = 0; 10971 buf->dtb_drops = 0; 10972 10973 if (flags & DTRACEBUF_NOSWITCH) 10974 continue; 10975 10976 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10977 goto err; 10978 } 10979 10980 return (0); 10981 10982 err: 10983 /* 10984 * Error allocating memory, so free the buffers that were 10985 * allocated before the failed allocation. 10986 */ 10987 CPU_FOREACH(i) { 10988 if (cpu != DTRACE_CPUALL && cpu != i) 10989 continue; 10990 10991 buf = &bufs[i]; 10992 10993 if (buf->dtb_xamot != NULL) { 10994 ASSERT(buf->dtb_tomax != NULL); 10995 ASSERT(buf->dtb_size == size); 10996 kmem_free(buf->dtb_xamot, size); 10997 } 10998 10999 if (buf->dtb_tomax != NULL) { 11000 ASSERT(buf->dtb_size == size); 11001 kmem_free(buf->dtb_tomax, size); 11002 } 11003 11004 buf->dtb_tomax = NULL; 11005 buf->dtb_xamot = NULL; 11006 buf->dtb_size = 0; 11007 11008 } 11009 11010 return (ENOMEM); 11011 #endif 11012 } 11013 11014 /* 11015 * Note: called from probe context. This function just increments the drop 11016 * count on a buffer. It has been made a function to allow for the 11017 * possibility of understanding the source of mysterious drop counts. (A 11018 * problem for which one may be particularly disappointed that DTrace cannot 11019 * be used to understand DTrace.) 11020 */ 11021 static void 11022 dtrace_buffer_drop(dtrace_buffer_t *buf) 11023 { 11024 buf->dtb_drops++; 11025 } 11026 11027 /* 11028 * Note: called from probe context. This function is called to reserve space 11029 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 11030 * mstate. Returns the new offset in the buffer, or a negative value if an 11031 * error has occurred. 11032 */ 11033 static intptr_t 11034 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 11035 dtrace_state_t *state, dtrace_mstate_t *mstate) 11036 { 11037 intptr_t offs = buf->dtb_offset, soffs; 11038 intptr_t woffs; 11039 caddr_t tomax; 11040 size_t total; 11041 11042 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 11043 return (-1); 11044 11045 if ((tomax = buf->dtb_tomax) == NULL) { 11046 dtrace_buffer_drop(buf); 11047 return (-1); 11048 } 11049 11050 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 11051 while (offs & (align - 1)) { 11052 /* 11053 * Assert that our alignment is off by a number which 11054 * is itself sizeof (uint32_t) aligned. 11055 */ 11056 ASSERT(!((align - (offs & (align - 1))) & 11057 (sizeof (uint32_t) - 1))); 11058 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11059 offs += sizeof (uint32_t); 11060 } 11061 11062 if ((soffs = offs + needed) > buf->dtb_size) { 11063 dtrace_buffer_drop(buf); 11064 return (-1); 11065 } 11066 11067 if (mstate == NULL) 11068 return (offs); 11069 11070 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 11071 mstate->dtms_scratch_size = buf->dtb_size - soffs; 11072 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11073 11074 return (offs); 11075 } 11076 11077 if (buf->dtb_flags & DTRACEBUF_FILL) { 11078 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11079 (buf->dtb_flags & DTRACEBUF_FULL)) 11080 return (-1); 11081 goto out; 11082 } 11083 11084 total = needed + (offs & (align - 1)); 11085 11086 /* 11087 * For a ring buffer, life is quite a bit more complicated. Before 11088 * we can store any padding, we need to adjust our wrapping offset. 11089 * (If we've never before wrapped or we're not about to, no adjustment 11090 * is required.) 11091 */ 11092 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11093 offs + total > buf->dtb_size) { 11094 woffs = buf->dtb_xamot_offset; 11095 11096 if (offs + total > buf->dtb_size) { 11097 /* 11098 * We can't fit in the end of the buffer. First, a 11099 * sanity check that we can fit in the buffer at all. 11100 */ 11101 if (total > buf->dtb_size) { 11102 dtrace_buffer_drop(buf); 11103 return (-1); 11104 } 11105 11106 /* 11107 * We're going to be storing at the top of the buffer, 11108 * so now we need to deal with the wrapped offset. We 11109 * only reset our wrapped offset to 0 if it is 11110 * currently greater than the current offset. If it 11111 * is less than the current offset, it is because a 11112 * previous allocation induced a wrap -- but the 11113 * allocation didn't subsequently take the space due 11114 * to an error or false predicate evaluation. In this 11115 * case, we'll just leave the wrapped offset alone: if 11116 * the wrapped offset hasn't been advanced far enough 11117 * for this allocation, it will be adjusted in the 11118 * lower loop. 11119 */ 11120 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11121 if (woffs >= offs) 11122 woffs = 0; 11123 } else { 11124 woffs = 0; 11125 } 11126 11127 /* 11128 * Now we know that we're going to be storing to the 11129 * top of the buffer and that there is room for us 11130 * there. We need to clear the buffer from the current 11131 * offset to the end (there may be old gunk there). 11132 */ 11133 while (offs < buf->dtb_size) 11134 tomax[offs++] = 0; 11135 11136 /* 11137 * We need to set our offset to zero. And because we 11138 * are wrapping, we need to set the bit indicating as 11139 * much. We can also adjust our needed space back 11140 * down to the space required by the ECB -- we know 11141 * that the top of the buffer is aligned. 11142 */ 11143 offs = 0; 11144 total = needed; 11145 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11146 } else { 11147 /* 11148 * There is room for us in the buffer, so we simply 11149 * need to check the wrapped offset. 11150 */ 11151 if (woffs < offs) { 11152 /* 11153 * The wrapped offset is less than the offset. 11154 * This can happen if we allocated buffer space 11155 * that induced a wrap, but then we didn't 11156 * subsequently take the space due to an error 11157 * or false predicate evaluation. This is 11158 * okay; we know that _this_ allocation isn't 11159 * going to induce a wrap. We still can't 11160 * reset the wrapped offset to be zero, 11161 * however: the space may have been trashed in 11162 * the previous failed probe attempt. But at 11163 * least the wrapped offset doesn't need to 11164 * be adjusted at all... 11165 */ 11166 goto out; 11167 } 11168 } 11169 11170 while (offs + total > woffs) { 11171 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11172 size_t size; 11173 11174 if (epid == DTRACE_EPIDNONE) { 11175 size = sizeof (uint32_t); 11176 } else { 11177 ASSERT3U(epid, <=, state->dts_necbs); 11178 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11179 11180 size = state->dts_ecbs[epid - 1]->dte_size; 11181 } 11182 11183 ASSERT(woffs + size <= buf->dtb_size); 11184 ASSERT(size != 0); 11185 11186 if (woffs + size == buf->dtb_size) { 11187 /* 11188 * We've reached the end of the buffer; we want 11189 * to set the wrapped offset to 0 and break 11190 * out. However, if the offs is 0, then we're 11191 * in a strange edge-condition: the amount of 11192 * space that we want to reserve plus the size 11193 * of the record that we're overwriting is 11194 * greater than the size of the buffer. This 11195 * is problematic because if we reserve the 11196 * space but subsequently don't consume it (due 11197 * to a failed predicate or error) the wrapped 11198 * offset will be 0 -- yet the EPID at offset 0 11199 * will not be committed. This situation is 11200 * relatively easy to deal with: if we're in 11201 * this case, the buffer is indistinguishable 11202 * from one that hasn't wrapped; we need only 11203 * finish the job by clearing the wrapped bit, 11204 * explicitly setting the offset to be 0, and 11205 * zero'ing out the old data in the buffer. 11206 */ 11207 if (offs == 0) { 11208 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11209 buf->dtb_offset = 0; 11210 woffs = total; 11211 11212 while (woffs < buf->dtb_size) 11213 tomax[woffs++] = 0; 11214 } 11215 11216 woffs = 0; 11217 break; 11218 } 11219 11220 woffs += size; 11221 } 11222 11223 /* 11224 * We have a wrapped offset. It may be that the wrapped offset 11225 * has become zero -- that's okay. 11226 */ 11227 buf->dtb_xamot_offset = woffs; 11228 } 11229 11230 out: 11231 /* 11232 * Now we can plow the buffer with any necessary padding. 11233 */ 11234 while (offs & (align - 1)) { 11235 /* 11236 * Assert that our alignment is off by a number which 11237 * is itself sizeof (uint32_t) aligned. 11238 */ 11239 ASSERT(!((align - (offs & (align - 1))) & 11240 (sizeof (uint32_t) - 1))); 11241 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11242 offs += sizeof (uint32_t); 11243 } 11244 11245 if (buf->dtb_flags & DTRACEBUF_FILL) { 11246 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11247 buf->dtb_flags |= DTRACEBUF_FULL; 11248 return (-1); 11249 } 11250 } 11251 11252 if (mstate == NULL) 11253 return (offs); 11254 11255 /* 11256 * For ring buffers and fill buffers, the scratch space is always 11257 * the inactive buffer. 11258 */ 11259 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11260 mstate->dtms_scratch_size = buf->dtb_size; 11261 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11262 11263 return (offs); 11264 } 11265 11266 static void 11267 dtrace_buffer_polish(dtrace_buffer_t *buf) 11268 { 11269 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11270 ASSERT(MUTEX_HELD(&dtrace_lock)); 11271 11272 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11273 return; 11274 11275 /* 11276 * We need to polish the ring buffer. There are three cases: 11277 * 11278 * - The first (and presumably most common) is that there is no gap 11279 * between the buffer offset and the wrapped offset. In this case, 11280 * there is nothing in the buffer that isn't valid data; we can 11281 * mark the buffer as polished and return. 11282 * 11283 * - The second (less common than the first but still more common 11284 * than the third) is that there is a gap between the buffer offset 11285 * and the wrapped offset, and the wrapped offset is larger than the 11286 * buffer offset. This can happen because of an alignment issue, or 11287 * can happen because of a call to dtrace_buffer_reserve() that 11288 * didn't subsequently consume the buffer space. In this case, 11289 * we need to zero the data from the buffer offset to the wrapped 11290 * offset. 11291 * 11292 * - The third (and least common) is that there is a gap between the 11293 * buffer offset and the wrapped offset, but the wrapped offset is 11294 * _less_ than the buffer offset. This can only happen because a 11295 * call to dtrace_buffer_reserve() induced a wrap, but the space 11296 * was not subsequently consumed. In this case, we need to zero the 11297 * space from the offset to the end of the buffer _and_ from the 11298 * top of the buffer to the wrapped offset. 11299 */ 11300 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11301 bzero(buf->dtb_tomax + buf->dtb_offset, 11302 buf->dtb_xamot_offset - buf->dtb_offset); 11303 } 11304 11305 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11306 bzero(buf->dtb_tomax + buf->dtb_offset, 11307 buf->dtb_size - buf->dtb_offset); 11308 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11309 } 11310 } 11311 11312 /* 11313 * This routine determines if data generated at the specified time has likely 11314 * been entirely consumed at user-level. This routine is called to determine 11315 * if an ECB on a defunct probe (but for an active enabling) can be safely 11316 * disabled and destroyed. 11317 */ 11318 static int 11319 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11320 { 11321 int i; 11322 11323 for (i = 0; i < NCPU; i++) { 11324 dtrace_buffer_t *buf = &bufs[i]; 11325 11326 if (buf->dtb_size == 0) 11327 continue; 11328 11329 if (buf->dtb_flags & DTRACEBUF_RING) 11330 return (0); 11331 11332 if (!buf->dtb_switched && buf->dtb_offset != 0) 11333 return (0); 11334 11335 if (buf->dtb_switched - buf->dtb_interval < when) 11336 return (0); 11337 } 11338 11339 return (1); 11340 } 11341 11342 static void 11343 dtrace_buffer_free(dtrace_buffer_t *bufs) 11344 { 11345 int i; 11346 11347 for (i = 0; i < NCPU; i++) { 11348 dtrace_buffer_t *buf = &bufs[i]; 11349 11350 if (buf->dtb_tomax == NULL) { 11351 ASSERT(buf->dtb_xamot == NULL); 11352 ASSERT(buf->dtb_size == 0); 11353 continue; 11354 } 11355 11356 if (buf->dtb_xamot != NULL) { 11357 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11358 kmem_free(buf->dtb_xamot, buf->dtb_size); 11359 } 11360 11361 kmem_free(buf->dtb_tomax, buf->dtb_size); 11362 buf->dtb_size = 0; 11363 buf->dtb_tomax = NULL; 11364 buf->dtb_xamot = NULL; 11365 } 11366 } 11367 11368 /* 11369 * DTrace Enabling Functions 11370 */ 11371 static dtrace_enabling_t * 11372 dtrace_enabling_create(dtrace_vstate_t *vstate) 11373 { 11374 dtrace_enabling_t *enab; 11375 11376 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11377 enab->dten_vstate = vstate; 11378 11379 return (enab); 11380 } 11381 11382 static void 11383 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11384 { 11385 dtrace_ecbdesc_t **ndesc; 11386 size_t osize, nsize; 11387 11388 /* 11389 * We can't add to enablings after we've enabled them, or after we've 11390 * retained them. 11391 */ 11392 ASSERT(enab->dten_probegen == 0); 11393 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11394 11395 if (enab->dten_ndesc < enab->dten_maxdesc) { 11396 enab->dten_desc[enab->dten_ndesc++] = ecb; 11397 return; 11398 } 11399 11400 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11401 11402 if (enab->dten_maxdesc == 0) { 11403 enab->dten_maxdesc = 1; 11404 } else { 11405 enab->dten_maxdesc <<= 1; 11406 } 11407 11408 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11409 11410 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11411 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11412 bcopy(enab->dten_desc, ndesc, osize); 11413 if (enab->dten_desc != NULL) 11414 kmem_free(enab->dten_desc, osize); 11415 11416 enab->dten_desc = ndesc; 11417 enab->dten_desc[enab->dten_ndesc++] = ecb; 11418 } 11419 11420 static void 11421 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11422 dtrace_probedesc_t *pd) 11423 { 11424 dtrace_ecbdesc_t *new; 11425 dtrace_predicate_t *pred; 11426 dtrace_actdesc_t *act; 11427 11428 /* 11429 * We're going to create a new ECB description that matches the 11430 * specified ECB in every way, but has the specified probe description. 11431 */ 11432 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11433 11434 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11435 dtrace_predicate_hold(pred); 11436 11437 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11438 dtrace_actdesc_hold(act); 11439 11440 new->dted_action = ecb->dted_action; 11441 new->dted_pred = ecb->dted_pred; 11442 new->dted_probe = *pd; 11443 new->dted_uarg = ecb->dted_uarg; 11444 11445 dtrace_enabling_add(enab, new); 11446 } 11447 11448 static void 11449 dtrace_enabling_dump(dtrace_enabling_t *enab) 11450 { 11451 int i; 11452 11453 for (i = 0; i < enab->dten_ndesc; i++) { 11454 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11455 11456 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11457 desc->dtpd_provider, desc->dtpd_mod, 11458 desc->dtpd_func, desc->dtpd_name); 11459 } 11460 } 11461 11462 static void 11463 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11464 { 11465 int i; 11466 dtrace_ecbdesc_t *ep; 11467 dtrace_vstate_t *vstate = enab->dten_vstate; 11468 11469 ASSERT(MUTEX_HELD(&dtrace_lock)); 11470 11471 for (i = 0; i < enab->dten_ndesc; i++) { 11472 dtrace_actdesc_t *act, *next; 11473 dtrace_predicate_t *pred; 11474 11475 ep = enab->dten_desc[i]; 11476 11477 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11478 dtrace_predicate_release(pred, vstate); 11479 11480 for (act = ep->dted_action; act != NULL; act = next) { 11481 next = act->dtad_next; 11482 dtrace_actdesc_release(act, vstate); 11483 } 11484 11485 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11486 } 11487 11488 if (enab->dten_desc != NULL) 11489 kmem_free(enab->dten_desc, 11490 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11491 11492 /* 11493 * If this was a retained enabling, decrement the dts_nretained count 11494 * and take it off of the dtrace_retained list. 11495 */ 11496 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11497 dtrace_retained == enab) { 11498 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11499 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11500 enab->dten_vstate->dtvs_state->dts_nretained--; 11501 } 11502 11503 if (enab->dten_prev == NULL) { 11504 if (dtrace_retained == enab) { 11505 dtrace_retained = enab->dten_next; 11506 11507 if (dtrace_retained != NULL) 11508 dtrace_retained->dten_prev = NULL; 11509 } 11510 } else { 11511 ASSERT(enab != dtrace_retained); 11512 ASSERT(dtrace_retained != NULL); 11513 enab->dten_prev->dten_next = enab->dten_next; 11514 } 11515 11516 if (enab->dten_next != NULL) { 11517 ASSERT(dtrace_retained != NULL); 11518 enab->dten_next->dten_prev = enab->dten_prev; 11519 } 11520 11521 kmem_free(enab, sizeof (dtrace_enabling_t)); 11522 } 11523 11524 static int 11525 dtrace_enabling_retain(dtrace_enabling_t *enab) 11526 { 11527 dtrace_state_t *state; 11528 11529 ASSERT(MUTEX_HELD(&dtrace_lock)); 11530 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11531 ASSERT(enab->dten_vstate != NULL); 11532 11533 state = enab->dten_vstate->dtvs_state; 11534 ASSERT(state != NULL); 11535 11536 /* 11537 * We only allow each state to retain dtrace_retain_max enablings. 11538 */ 11539 if (state->dts_nretained >= dtrace_retain_max) 11540 return (ENOSPC); 11541 11542 state->dts_nretained++; 11543 11544 if (dtrace_retained == NULL) { 11545 dtrace_retained = enab; 11546 return (0); 11547 } 11548 11549 enab->dten_next = dtrace_retained; 11550 dtrace_retained->dten_prev = enab; 11551 dtrace_retained = enab; 11552 11553 return (0); 11554 } 11555 11556 static int 11557 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11558 dtrace_probedesc_t *create) 11559 { 11560 dtrace_enabling_t *new, *enab; 11561 int found = 0, err = ENOENT; 11562 11563 ASSERT(MUTEX_HELD(&dtrace_lock)); 11564 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11565 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11566 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11567 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11568 11569 new = dtrace_enabling_create(&state->dts_vstate); 11570 11571 /* 11572 * Iterate over all retained enablings, looking for enablings that 11573 * match the specified state. 11574 */ 11575 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11576 int i; 11577 11578 /* 11579 * dtvs_state can only be NULL for helper enablings -- and 11580 * helper enablings can't be retained. 11581 */ 11582 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11583 11584 if (enab->dten_vstate->dtvs_state != state) 11585 continue; 11586 11587 /* 11588 * Now iterate over each probe description; we're looking for 11589 * an exact match to the specified probe description. 11590 */ 11591 for (i = 0; i < enab->dten_ndesc; i++) { 11592 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11593 dtrace_probedesc_t *pd = &ep->dted_probe; 11594 11595 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11596 continue; 11597 11598 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11599 continue; 11600 11601 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11602 continue; 11603 11604 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11605 continue; 11606 11607 /* 11608 * We have a winning probe! Add it to our growing 11609 * enabling. 11610 */ 11611 found = 1; 11612 dtrace_enabling_addlike(new, ep, create); 11613 } 11614 } 11615 11616 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11617 dtrace_enabling_destroy(new); 11618 return (err); 11619 } 11620 11621 return (0); 11622 } 11623 11624 static void 11625 dtrace_enabling_retract(dtrace_state_t *state) 11626 { 11627 dtrace_enabling_t *enab, *next; 11628 11629 ASSERT(MUTEX_HELD(&dtrace_lock)); 11630 11631 /* 11632 * Iterate over all retained enablings, destroy the enablings retained 11633 * for the specified state. 11634 */ 11635 for (enab = dtrace_retained; enab != NULL; enab = next) { 11636 next = enab->dten_next; 11637 11638 /* 11639 * dtvs_state can only be NULL for helper enablings -- and 11640 * helper enablings can't be retained. 11641 */ 11642 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11643 11644 if (enab->dten_vstate->dtvs_state == state) { 11645 ASSERT(state->dts_nretained > 0); 11646 dtrace_enabling_destroy(enab); 11647 } 11648 } 11649 11650 ASSERT(state->dts_nretained == 0); 11651 } 11652 11653 static int 11654 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11655 { 11656 int i = 0; 11657 int matched = 0; 11658 11659 ASSERT(MUTEX_HELD(&cpu_lock)); 11660 ASSERT(MUTEX_HELD(&dtrace_lock)); 11661 11662 for (i = 0; i < enab->dten_ndesc; i++) { 11663 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11664 11665 enab->dten_current = ep; 11666 enab->dten_error = 0; 11667 11668 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11669 11670 if (enab->dten_error != 0) { 11671 /* 11672 * If we get an error half-way through enabling the 11673 * probes, we kick out -- perhaps with some number of 11674 * them enabled. Leaving enabled probes enabled may 11675 * be slightly confusing for user-level, but we expect 11676 * that no one will attempt to actually drive on in 11677 * the face of such errors. If this is an anonymous 11678 * enabling (indicated with a NULL nmatched pointer), 11679 * we cmn_err() a message. We aren't expecting to 11680 * get such an error -- such as it can exist at all, 11681 * it would be a result of corrupted DOF in the driver 11682 * properties. 11683 */ 11684 if (nmatched == NULL) { 11685 cmn_err(CE_WARN, "dtrace_enabling_match() " 11686 "error on %p: %d", (void *)ep, 11687 enab->dten_error); 11688 } 11689 11690 return (enab->dten_error); 11691 } 11692 } 11693 11694 enab->dten_probegen = dtrace_probegen; 11695 if (nmatched != NULL) 11696 *nmatched = matched; 11697 11698 return (0); 11699 } 11700 11701 static void 11702 dtrace_enabling_matchall(void) 11703 { 11704 dtrace_enabling_t *enab; 11705 11706 mutex_enter(&cpu_lock); 11707 mutex_enter(&dtrace_lock); 11708 11709 /* 11710 * Iterate over all retained enablings to see if any probes match 11711 * against them. We only perform this operation on enablings for which 11712 * we have sufficient permissions by virtue of being in the global zone 11713 * or in the same zone as the DTrace client. Because we can be called 11714 * after dtrace_detach() has been called, we cannot assert that there 11715 * are retained enablings. We can safely load from dtrace_retained, 11716 * however: the taskq_destroy() at the end of dtrace_detach() will 11717 * block pending our completion. 11718 */ 11719 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11720 #if defined(sun) 11721 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11722 11723 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11724 #endif 11725 (void) dtrace_enabling_match(enab, NULL); 11726 } 11727 11728 mutex_exit(&dtrace_lock); 11729 mutex_exit(&cpu_lock); 11730 } 11731 11732 /* 11733 * If an enabling is to be enabled without having matched probes (that is, if 11734 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11735 * enabling must be _primed_ by creating an ECB for every ECB description. 11736 * This must be done to assure that we know the number of speculations, the 11737 * number of aggregations, the minimum buffer size needed, etc. before we 11738 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11739 * enabling any probes, we create ECBs for every ECB decription, but with a 11740 * NULL probe -- which is exactly what this function does. 11741 */ 11742 static void 11743 dtrace_enabling_prime(dtrace_state_t *state) 11744 { 11745 dtrace_enabling_t *enab; 11746 int i; 11747 11748 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11749 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11750 11751 if (enab->dten_vstate->dtvs_state != state) 11752 continue; 11753 11754 /* 11755 * We don't want to prime an enabling more than once, lest 11756 * we allow a malicious user to induce resource exhaustion. 11757 * (The ECBs that result from priming an enabling aren't 11758 * leaked -- but they also aren't deallocated until the 11759 * consumer state is destroyed.) 11760 */ 11761 if (enab->dten_primed) 11762 continue; 11763 11764 for (i = 0; i < enab->dten_ndesc; i++) { 11765 enab->dten_current = enab->dten_desc[i]; 11766 (void) dtrace_probe_enable(NULL, enab); 11767 } 11768 11769 enab->dten_primed = 1; 11770 } 11771 } 11772 11773 /* 11774 * Called to indicate that probes should be provided due to retained 11775 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11776 * must take an initial lap through the enabling calling the dtps_provide() 11777 * entry point explicitly to allow for autocreated probes. 11778 */ 11779 static void 11780 dtrace_enabling_provide(dtrace_provider_t *prv) 11781 { 11782 int i, all = 0; 11783 dtrace_probedesc_t desc; 11784 11785 ASSERT(MUTEX_HELD(&dtrace_lock)); 11786 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11787 11788 if (prv == NULL) { 11789 all = 1; 11790 prv = dtrace_provider; 11791 } 11792 11793 do { 11794 dtrace_enabling_t *enab = dtrace_retained; 11795 void *parg = prv->dtpv_arg; 11796 11797 for (; enab != NULL; enab = enab->dten_next) { 11798 for (i = 0; i < enab->dten_ndesc; i++) { 11799 desc = enab->dten_desc[i]->dted_probe; 11800 mutex_exit(&dtrace_lock); 11801 prv->dtpv_pops.dtps_provide(parg, &desc); 11802 mutex_enter(&dtrace_lock); 11803 } 11804 } 11805 } while (all && (prv = prv->dtpv_next) != NULL); 11806 11807 mutex_exit(&dtrace_lock); 11808 dtrace_probe_provide(NULL, all ? NULL : prv); 11809 mutex_enter(&dtrace_lock); 11810 } 11811 11812 /* 11813 * Called to reap ECBs that are attached to probes from defunct providers. 11814 */ 11815 static void 11816 dtrace_enabling_reap(void) 11817 { 11818 dtrace_provider_t *prov; 11819 dtrace_probe_t *probe; 11820 dtrace_ecb_t *ecb; 11821 hrtime_t when; 11822 int i; 11823 11824 mutex_enter(&cpu_lock); 11825 mutex_enter(&dtrace_lock); 11826 11827 for (i = 0; i < dtrace_nprobes; i++) { 11828 if ((probe = dtrace_probes[i]) == NULL) 11829 continue; 11830 11831 if (probe->dtpr_ecb == NULL) 11832 continue; 11833 11834 prov = probe->dtpr_provider; 11835 11836 if ((when = prov->dtpv_defunct) == 0) 11837 continue; 11838 11839 /* 11840 * We have ECBs on a defunct provider: we want to reap these 11841 * ECBs to allow the provider to unregister. The destruction 11842 * of these ECBs must be done carefully: if we destroy the ECB 11843 * and the consumer later wishes to consume an EPID that 11844 * corresponds to the destroyed ECB (and if the EPID metadata 11845 * has not been previously consumed), the consumer will abort 11846 * processing on the unknown EPID. To reduce (but not, sadly, 11847 * eliminate) the possibility of this, we will only destroy an 11848 * ECB for a defunct provider if, for the state that 11849 * corresponds to the ECB: 11850 * 11851 * (a) There is no speculative tracing (which can effectively 11852 * cache an EPID for an arbitrary amount of time). 11853 * 11854 * (b) The principal buffers have been switched twice since the 11855 * provider became defunct. 11856 * 11857 * (c) The aggregation buffers are of zero size or have been 11858 * switched twice since the provider became defunct. 11859 * 11860 * We use dts_speculates to determine (a) and call a function 11861 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11862 * that as soon as we've been unable to destroy one of the ECBs 11863 * associated with the probe, we quit trying -- reaping is only 11864 * fruitful in as much as we can destroy all ECBs associated 11865 * with the defunct provider's probes. 11866 */ 11867 while ((ecb = probe->dtpr_ecb) != NULL) { 11868 dtrace_state_t *state = ecb->dte_state; 11869 dtrace_buffer_t *buf = state->dts_buffer; 11870 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11871 11872 if (state->dts_speculates) 11873 break; 11874 11875 if (!dtrace_buffer_consumed(buf, when)) 11876 break; 11877 11878 if (!dtrace_buffer_consumed(aggbuf, when)) 11879 break; 11880 11881 dtrace_ecb_disable(ecb); 11882 ASSERT(probe->dtpr_ecb != ecb); 11883 dtrace_ecb_destroy(ecb); 11884 } 11885 } 11886 11887 mutex_exit(&dtrace_lock); 11888 mutex_exit(&cpu_lock); 11889 } 11890 11891 /* 11892 * DTrace DOF Functions 11893 */ 11894 /*ARGSUSED*/ 11895 static void 11896 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11897 { 11898 if (dtrace_err_verbose) 11899 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11900 11901 #ifdef DTRACE_ERRDEBUG 11902 dtrace_errdebug(str); 11903 #endif 11904 } 11905 11906 /* 11907 * Create DOF out of a currently enabled state. Right now, we only create 11908 * DOF containing the run-time options -- but this could be expanded to create 11909 * complete DOF representing the enabled state. 11910 */ 11911 static dof_hdr_t * 11912 dtrace_dof_create(dtrace_state_t *state) 11913 { 11914 dof_hdr_t *dof; 11915 dof_sec_t *sec; 11916 dof_optdesc_t *opt; 11917 int i, len = sizeof (dof_hdr_t) + 11918 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11919 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11920 11921 ASSERT(MUTEX_HELD(&dtrace_lock)); 11922 11923 dof = kmem_zalloc(len, KM_SLEEP); 11924 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11925 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11926 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11927 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11928 11929 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11930 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11931 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11932 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11933 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11934 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11935 11936 dof->dofh_flags = 0; 11937 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11938 dof->dofh_secsize = sizeof (dof_sec_t); 11939 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11940 dof->dofh_secoff = sizeof (dof_hdr_t); 11941 dof->dofh_loadsz = len; 11942 dof->dofh_filesz = len; 11943 dof->dofh_pad = 0; 11944 11945 /* 11946 * Fill in the option section header... 11947 */ 11948 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11949 sec->dofs_type = DOF_SECT_OPTDESC; 11950 sec->dofs_align = sizeof (uint64_t); 11951 sec->dofs_flags = DOF_SECF_LOAD; 11952 sec->dofs_entsize = sizeof (dof_optdesc_t); 11953 11954 opt = (dof_optdesc_t *)((uintptr_t)sec + 11955 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11956 11957 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11958 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11959 11960 for (i = 0; i < DTRACEOPT_MAX; i++) { 11961 opt[i].dofo_option = i; 11962 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11963 opt[i].dofo_value = state->dts_options[i]; 11964 } 11965 11966 return (dof); 11967 } 11968 11969 static dof_hdr_t * 11970 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11971 { 11972 dof_hdr_t hdr, *dof; 11973 11974 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11975 11976 /* 11977 * First, we're going to copyin() the sizeof (dof_hdr_t). 11978 */ 11979 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11980 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11981 *errp = EFAULT; 11982 return (NULL); 11983 } 11984 11985 /* 11986 * Now we'll allocate the entire DOF and copy it in -- provided 11987 * that the length isn't outrageous. 11988 */ 11989 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11990 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11991 *errp = E2BIG; 11992 return (NULL); 11993 } 11994 11995 if (hdr.dofh_loadsz < sizeof (hdr)) { 11996 dtrace_dof_error(&hdr, "invalid load size"); 11997 *errp = EINVAL; 11998 return (NULL); 11999 } 12000 12001 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 12002 12003 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 12004 kmem_free(dof, hdr.dofh_loadsz); 12005 *errp = EFAULT; 12006 return (NULL); 12007 } 12008 12009 return (dof); 12010 } 12011 12012 #if !defined(sun) 12013 static __inline uchar_t 12014 dtrace_dof_char(char c) { 12015 switch (c) { 12016 case '0': 12017 case '1': 12018 case '2': 12019 case '3': 12020 case '4': 12021 case '5': 12022 case '6': 12023 case '7': 12024 case '8': 12025 case '9': 12026 return (c - '0'); 12027 case 'A': 12028 case 'B': 12029 case 'C': 12030 case 'D': 12031 case 'E': 12032 case 'F': 12033 return (c - 'A' + 10); 12034 case 'a': 12035 case 'b': 12036 case 'c': 12037 case 'd': 12038 case 'e': 12039 case 'f': 12040 return (c - 'a' + 10); 12041 } 12042 /* Should not reach here. */ 12043 return (0); 12044 } 12045 #endif 12046 12047 static dof_hdr_t * 12048 dtrace_dof_property(const char *name) 12049 { 12050 uchar_t *buf; 12051 uint64_t loadsz; 12052 unsigned int len, i; 12053 dof_hdr_t *dof; 12054 12055 #if defined(sun) 12056 /* 12057 * Unfortunately, array of values in .conf files are always (and 12058 * only) interpreted to be integer arrays. We must read our DOF 12059 * as an integer array, and then squeeze it into a byte array. 12060 */ 12061 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 12062 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 12063 return (NULL); 12064 12065 for (i = 0; i < len; i++) 12066 buf[i] = (uchar_t)(((int *)buf)[i]); 12067 12068 if (len < sizeof (dof_hdr_t)) { 12069 ddi_prop_free(buf); 12070 dtrace_dof_error(NULL, "truncated header"); 12071 return (NULL); 12072 } 12073 12074 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 12075 ddi_prop_free(buf); 12076 dtrace_dof_error(NULL, "truncated DOF"); 12077 return (NULL); 12078 } 12079 12080 if (loadsz >= dtrace_dof_maxsize) { 12081 ddi_prop_free(buf); 12082 dtrace_dof_error(NULL, "oversized DOF"); 12083 return (NULL); 12084 } 12085 12086 dof = kmem_alloc(loadsz, KM_SLEEP); 12087 bcopy(buf, dof, loadsz); 12088 ddi_prop_free(buf); 12089 #else 12090 char *p; 12091 char *p_env; 12092 12093 if ((p_env = getenv(name)) == NULL) 12094 return (NULL); 12095 12096 len = strlen(p_env) / 2; 12097 12098 buf = kmem_alloc(len, KM_SLEEP); 12099 12100 dof = (dof_hdr_t *) buf; 12101 12102 p = p_env; 12103 12104 for (i = 0; i < len; i++) { 12105 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12106 dtrace_dof_char(p[1]); 12107 p += 2; 12108 } 12109 12110 freeenv(p_env); 12111 12112 if (len < sizeof (dof_hdr_t)) { 12113 kmem_free(buf, 0); 12114 dtrace_dof_error(NULL, "truncated header"); 12115 return (NULL); 12116 } 12117 12118 if (len < (loadsz = dof->dofh_loadsz)) { 12119 kmem_free(buf, 0); 12120 dtrace_dof_error(NULL, "truncated DOF"); 12121 return (NULL); 12122 } 12123 12124 if (loadsz >= dtrace_dof_maxsize) { 12125 kmem_free(buf, 0); 12126 dtrace_dof_error(NULL, "oversized DOF"); 12127 return (NULL); 12128 } 12129 #endif 12130 12131 return (dof); 12132 } 12133 12134 static void 12135 dtrace_dof_destroy(dof_hdr_t *dof) 12136 { 12137 kmem_free(dof, dof->dofh_loadsz); 12138 } 12139 12140 /* 12141 * Return the dof_sec_t pointer corresponding to a given section index. If the 12142 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12143 * a type other than DOF_SECT_NONE is specified, the header is checked against 12144 * this type and NULL is returned if the types do not match. 12145 */ 12146 static dof_sec_t * 12147 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12148 { 12149 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12150 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12151 12152 if (i >= dof->dofh_secnum) { 12153 dtrace_dof_error(dof, "referenced section index is invalid"); 12154 return (NULL); 12155 } 12156 12157 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12158 dtrace_dof_error(dof, "referenced section is not loadable"); 12159 return (NULL); 12160 } 12161 12162 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12163 dtrace_dof_error(dof, "referenced section is the wrong type"); 12164 return (NULL); 12165 } 12166 12167 return (sec); 12168 } 12169 12170 static dtrace_probedesc_t * 12171 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12172 { 12173 dof_probedesc_t *probe; 12174 dof_sec_t *strtab; 12175 uintptr_t daddr = (uintptr_t)dof; 12176 uintptr_t str; 12177 size_t size; 12178 12179 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12180 dtrace_dof_error(dof, "invalid probe section"); 12181 return (NULL); 12182 } 12183 12184 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12185 dtrace_dof_error(dof, "bad alignment in probe description"); 12186 return (NULL); 12187 } 12188 12189 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12190 dtrace_dof_error(dof, "truncated probe description"); 12191 return (NULL); 12192 } 12193 12194 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12195 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12196 12197 if (strtab == NULL) 12198 return (NULL); 12199 12200 str = daddr + strtab->dofs_offset; 12201 size = strtab->dofs_size; 12202 12203 if (probe->dofp_provider >= strtab->dofs_size) { 12204 dtrace_dof_error(dof, "corrupt probe provider"); 12205 return (NULL); 12206 } 12207 12208 (void) strncpy(desc->dtpd_provider, 12209 (char *)(str + probe->dofp_provider), 12210 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12211 12212 if (probe->dofp_mod >= strtab->dofs_size) { 12213 dtrace_dof_error(dof, "corrupt probe module"); 12214 return (NULL); 12215 } 12216 12217 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12218 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12219 12220 if (probe->dofp_func >= strtab->dofs_size) { 12221 dtrace_dof_error(dof, "corrupt probe function"); 12222 return (NULL); 12223 } 12224 12225 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12226 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12227 12228 if (probe->dofp_name >= strtab->dofs_size) { 12229 dtrace_dof_error(dof, "corrupt probe name"); 12230 return (NULL); 12231 } 12232 12233 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12234 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12235 12236 return (desc); 12237 } 12238 12239 static dtrace_difo_t * 12240 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12241 cred_t *cr) 12242 { 12243 dtrace_difo_t *dp; 12244 size_t ttl = 0; 12245 dof_difohdr_t *dofd; 12246 uintptr_t daddr = (uintptr_t)dof; 12247 size_t max = dtrace_difo_maxsize; 12248 int i, l, n; 12249 12250 static const struct { 12251 int section; 12252 int bufoffs; 12253 int lenoffs; 12254 int entsize; 12255 int align; 12256 const char *msg; 12257 } difo[] = { 12258 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12259 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12260 sizeof (dif_instr_t), "multiple DIF sections" }, 12261 12262 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12263 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12264 sizeof (uint64_t), "multiple integer tables" }, 12265 12266 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12267 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12268 sizeof (char), "multiple string tables" }, 12269 12270 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12271 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12272 sizeof (uint_t), "multiple variable tables" }, 12273 12274 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12275 }; 12276 12277 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12278 dtrace_dof_error(dof, "invalid DIFO header section"); 12279 return (NULL); 12280 } 12281 12282 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12283 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12284 return (NULL); 12285 } 12286 12287 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12288 sec->dofs_size % sizeof (dof_secidx_t)) { 12289 dtrace_dof_error(dof, "bad size in DIFO header"); 12290 return (NULL); 12291 } 12292 12293 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12294 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12295 12296 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12297 dp->dtdo_rtype = dofd->dofd_rtype; 12298 12299 for (l = 0; l < n; l++) { 12300 dof_sec_t *subsec; 12301 void **bufp; 12302 uint32_t *lenp; 12303 12304 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12305 dofd->dofd_links[l])) == NULL) 12306 goto err; /* invalid section link */ 12307 12308 if (ttl + subsec->dofs_size > max) { 12309 dtrace_dof_error(dof, "exceeds maximum size"); 12310 goto err; 12311 } 12312 12313 ttl += subsec->dofs_size; 12314 12315 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12316 if (subsec->dofs_type != difo[i].section) 12317 continue; 12318 12319 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12320 dtrace_dof_error(dof, "section not loaded"); 12321 goto err; 12322 } 12323 12324 if (subsec->dofs_align != difo[i].align) { 12325 dtrace_dof_error(dof, "bad alignment"); 12326 goto err; 12327 } 12328 12329 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12330 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12331 12332 if (*bufp != NULL) { 12333 dtrace_dof_error(dof, difo[i].msg); 12334 goto err; 12335 } 12336 12337 if (difo[i].entsize != subsec->dofs_entsize) { 12338 dtrace_dof_error(dof, "entry size mismatch"); 12339 goto err; 12340 } 12341 12342 if (subsec->dofs_entsize != 0 && 12343 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12344 dtrace_dof_error(dof, "corrupt entry size"); 12345 goto err; 12346 } 12347 12348 *lenp = subsec->dofs_size; 12349 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12350 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12351 *bufp, subsec->dofs_size); 12352 12353 if (subsec->dofs_entsize != 0) 12354 *lenp /= subsec->dofs_entsize; 12355 12356 break; 12357 } 12358 12359 /* 12360 * If we encounter a loadable DIFO sub-section that is not 12361 * known to us, assume this is a broken program and fail. 12362 */ 12363 if (difo[i].section == DOF_SECT_NONE && 12364 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12365 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12366 goto err; 12367 } 12368 } 12369 12370 if (dp->dtdo_buf == NULL) { 12371 /* 12372 * We can't have a DIF object without DIF text. 12373 */ 12374 dtrace_dof_error(dof, "missing DIF text"); 12375 goto err; 12376 } 12377 12378 /* 12379 * Before we validate the DIF object, run through the variable table 12380 * looking for the strings -- if any of their size are under, we'll set 12381 * their size to be the system-wide default string size. Note that 12382 * this should _not_ happen if the "strsize" option has been set -- 12383 * in this case, the compiler should have set the size to reflect the 12384 * setting of the option. 12385 */ 12386 for (i = 0; i < dp->dtdo_varlen; i++) { 12387 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12388 dtrace_diftype_t *t = &v->dtdv_type; 12389 12390 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12391 continue; 12392 12393 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12394 t->dtdt_size = dtrace_strsize_default; 12395 } 12396 12397 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12398 goto err; 12399 12400 dtrace_difo_init(dp, vstate); 12401 return (dp); 12402 12403 err: 12404 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12405 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12406 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12407 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12408 12409 kmem_free(dp, sizeof (dtrace_difo_t)); 12410 return (NULL); 12411 } 12412 12413 static dtrace_predicate_t * 12414 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12415 cred_t *cr) 12416 { 12417 dtrace_difo_t *dp; 12418 12419 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12420 return (NULL); 12421 12422 return (dtrace_predicate_create(dp)); 12423 } 12424 12425 static dtrace_actdesc_t * 12426 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12427 cred_t *cr) 12428 { 12429 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12430 dof_actdesc_t *desc; 12431 dof_sec_t *difosec; 12432 size_t offs; 12433 uintptr_t daddr = (uintptr_t)dof; 12434 uint64_t arg; 12435 dtrace_actkind_t kind; 12436 12437 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12438 dtrace_dof_error(dof, "invalid action section"); 12439 return (NULL); 12440 } 12441 12442 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12443 dtrace_dof_error(dof, "truncated action description"); 12444 return (NULL); 12445 } 12446 12447 if (sec->dofs_align != sizeof (uint64_t)) { 12448 dtrace_dof_error(dof, "bad alignment in action description"); 12449 return (NULL); 12450 } 12451 12452 if (sec->dofs_size < sec->dofs_entsize) { 12453 dtrace_dof_error(dof, "section entry size exceeds total size"); 12454 return (NULL); 12455 } 12456 12457 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12458 dtrace_dof_error(dof, "bad entry size in action description"); 12459 return (NULL); 12460 } 12461 12462 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12463 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12464 return (NULL); 12465 } 12466 12467 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12468 desc = (dof_actdesc_t *)(daddr + 12469 (uintptr_t)sec->dofs_offset + offs); 12470 kind = (dtrace_actkind_t)desc->dofa_kind; 12471 12472 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12473 (kind != DTRACEACT_PRINTA || 12474 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12475 (kind == DTRACEACT_DIFEXPR && 12476 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12477 dof_sec_t *strtab; 12478 char *str, *fmt; 12479 uint64_t i; 12480 12481 /* 12482 * The argument to these actions is an index into the 12483 * DOF string table. For printf()-like actions, this 12484 * is the format string. For print(), this is the 12485 * CTF type of the expression result. 12486 */ 12487 if ((strtab = dtrace_dof_sect(dof, 12488 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12489 goto err; 12490 12491 str = (char *)((uintptr_t)dof + 12492 (uintptr_t)strtab->dofs_offset); 12493 12494 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12495 if (str[i] == '\0') 12496 break; 12497 } 12498 12499 if (i >= strtab->dofs_size) { 12500 dtrace_dof_error(dof, "bogus format string"); 12501 goto err; 12502 } 12503 12504 if (i == desc->dofa_arg) { 12505 dtrace_dof_error(dof, "empty format string"); 12506 goto err; 12507 } 12508 12509 i -= desc->dofa_arg; 12510 fmt = kmem_alloc(i + 1, KM_SLEEP); 12511 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12512 arg = (uint64_t)(uintptr_t)fmt; 12513 } else { 12514 if (kind == DTRACEACT_PRINTA) { 12515 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12516 arg = 0; 12517 } else { 12518 arg = desc->dofa_arg; 12519 } 12520 } 12521 12522 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12523 desc->dofa_uarg, arg); 12524 12525 if (last != NULL) { 12526 last->dtad_next = act; 12527 } else { 12528 first = act; 12529 } 12530 12531 last = act; 12532 12533 if (desc->dofa_difo == DOF_SECIDX_NONE) 12534 continue; 12535 12536 if ((difosec = dtrace_dof_sect(dof, 12537 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12538 goto err; 12539 12540 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12541 12542 if (act->dtad_difo == NULL) 12543 goto err; 12544 } 12545 12546 ASSERT(first != NULL); 12547 return (first); 12548 12549 err: 12550 for (act = first; act != NULL; act = next) { 12551 next = act->dtad_next; 12552 dtrace_actdesc_release(act, vstate); 12553 } 12554 12555 return (NULL); 12556 } 12557 12558 static dtrace_ecbdesc_t * 12559 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12560 cred_t *cr) 12561 { 12562 dtrace_ecbdesc_t *ep; 12563 dof_ecbdesc_t *ecb; 12564 dtrace_probedesc_t *desc; 12565 dtrace_predicate_t *pred = NULL; 12566 12567 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12568 dtrace_dof_error(dof, "truncated ECB description"); 12569 return (NULL); 12570 } 12571 12572 if (sec->dofs_align != sizeof (uint64_t)) { 12573 dtrace_dof_error(dof, "bad alignment in ECB description"); 12574 return (NULL); 12575 } 12576 12577 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12578 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12579 12580 if (sec == NULL) 12581 return (NULL); 12582 12583 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12584 ep->dted_uarg = ecb->dofe_uarg; 12585 desc = &ep->dted_probe; 12586 12587 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12588 goto err; 12589 12590 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12591 if ((sec = dtrace_dof_sect(dof, 12592 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12593 goto err; 12594 12595 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12596 goto err; 12597 12598 ep->dted_pred.dtpdd_predicate = pred; 12599 } 12600 12601 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12602 if ((sec = dtrace_dof_sect(dof, 12603 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12604 goto err; 12605 12606 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12607 12608 if (ep->dted_action == NULL) 12609 goto err; 12610 } 12611 12612 return (ep); 12613 12614 err: 12615 if (pred != NULL) 12616 dtrace_predicate_release(pred, vstate); 12617 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12618 return (NULL); 12619 } 12620 12621 /* 12622 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12623 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12624 * site of any user SETX relocations to account for load object base address. 12625 * In the future, if we need other relocations, this function can be extended. 12626 */ 12627 static int 12628 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12629 { 12630 uintptr_t daddr = (uintptr_t)dof; 12631 dof_relohdr_t *dofr = 12632 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12633 dof_sec_t *ss, *rs, *ts; 12634 dof_relodesc_t *r; 12635 uint_t i, n; 12636 12637 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12638 sec->dofs_align != sizeof (dof_secidx_t)) { 12639 dtrace_dof_error(dof, "invalid relocation header"); 12640 return (-1); 12641 } 12642 12643 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12644 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12645 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12646 12647 if (ss == NULL || rs == NULL || ts == NULL) 12648 return (-1); /* dtrace_dof_error() has been called already */ 12649 12650 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12651 rs->dofs_align != sizeof (uint64_t)) { 12652 dtrace_dof_error(dof, "invalid relocation section"); 12653 return (-1); 12654 } 12655 12656 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12657 n = rs->dofs_size / rs->dofs_entsize; 12658 12659 for (i = 0; i < n; i++) { 12660 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12661 12662 switch (r->dofr_type) { 12663 case DOF_RELO_NONE: 12664 break; 12665 case DOF_RELO_SETX: 12666 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12667 sizeof (uint64_t) > ts->dofs_size) { 12668 dtrace_dof_error(dof, "bad relocation offset"); 12669 return (-1); 12670 } 12671 12672 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12673 dtrace_dof_error(dof, "misaligned setx relo"); 12674 return (-1); 12675 } 12676 12677 *(uint64_t *)taddr += ubase; 12678 break; 12679 default: 12680 dtrace_dof_error(dof, "invalid relocation type"); 12681 return (-1); 12682 } 12683 12684 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12685 } 12686 12687 return (0); 12688 } 12689 12690 /* 12691 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12692 * header: it should be at the front of a memory region that is at least 12693 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12694 * size. It need not be validated in any other way. 12695 */ 12696 static int 12697 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12698 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12699 { 12700 uint64_t len = dof->dofh_loadsz, seclen; 12701 uintptr_t daddr = (uintptr_t)dof; 12702 dtrace_ecbdesc_t *ep; 12703 dtrace_enabling_t *enab; 12704 uint_t i; 12705 12706 ASSERT(MUTEX_HELD(&dtrace_lock)); 12707 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12708 12709 /* 12710 * Check the DOF header identification bytes. In addition to checking 12711 * valid settings, we also verify that unused bits/bytes are zeroed so 12712 * we can use them later without fear of regressing existing binaries. 12713 */ 12714 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12715 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12716 dtrace_dof_error(dof, "DOF magic string mismatch"); 12717 return (-1); 12718 } 12719 12720 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12721 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12722 dtrace_dof_error(dof, "DOF has invalid data model"); 12723 return (-1); 12724 } 12725 12726 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12727 dtrace_dof_error(dof, "DOF encoding mismatch"); 12728 return (-1); 12729 } 12730 12731 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12732 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12733 dtrace_dof_error(dof, "DOF version mismatch"); 12734 return (-1); 12735 } 12736 12737 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12738 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12739 return (-1); 12740 } 12741 12742 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12743 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12744 return (-1); 12745 } 12746 12747 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12748 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12749 return (-1); 12750 } 12751 12752 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12753 if (dof->dofh_ident[i] != 0) { 12754 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12755 return (-1); 12756 } 12757 } 12758 12759 if (dof->dofh_flags & ~DOF_FL_VALID) { 12760 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12761 return (-1); 12762 } 12763 12764 if (dof->dofh_secsize == 0) { 12765 dtrace_dof_error(dof, "zero section header size"); 12766 return (-1); 12767 } 12768 12769 /* 12770 * Check that the section headers don't exceed the amount of DOF 12771 * data. Note that we cast the section size and number of sections 12772 * to uint64_t's to prevent possible overflow in the multiplication. 12773 */ 12774 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12775 12776 if (dof->dofh_secoff > len || seclen > len || 12777 dof->dofh_secoff + seclen > len) { 12778 dtrace_dof_error(dof, "truncated section headers"); 12779 return (-1); 12780 } 12781 12782 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12783 dtrace_dof_error(dof, "misaligned section headers"); 12784 return (-1); 12785 } 12786 12787 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12788 dtrace_dof_error(dof, "misaligned section size"); 12789 return (-1); 12790 } 12791 12792 /* 12793 * Take an initial pass through the section headers to be sure that 12794 * the headers don't have stray offsets. If the 'noprobes' flag is 12795 * set, do not permit sections relating to providers, probes, or args. 12796 */ 12797 for (i = 0; i < dof->dofh_secnum; i++) { 12798 dof_sec_t *sec = (dof_sec_t *)(daddr + 12799 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12800 12801 if (noprobes) { 12802 switch (sec->dofs_type) { 12803 case DOF_SECT_PROVIDER: 12804 case DOF_SECT_PROBES: 12805 case DOF_SECT_PRARGS: 12806 case DOF_SECT_PROFFS: 12807 dtrace_dof_error(dof, "illegal sections " 12808 "for enabling"); 12809 return (-1); 12810 } 12811 } 12812 12813 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12814 continue; /* just ignore non-loadable sections */ 12815 12816 if (sec->dofs_align & (sec->dofs_align - 1)) { 12817 dtrace_dof_error(dof, "bad section alignment"); 12818 return (-1); 12819 } 12820 12821 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12822 dtrace_dof_error(dof, "misaligned section"); 12823 return (-1); 12824 } 12825 12826 if (sec->dofs_offset > len || sec->dofs_size > len || 12827 sec->dofs_offset + sec->dofs_size > len) { 12828 dtrace_dof_error(dof, "corrupt section header"); 12829 return (-1); 12830 } 12831 12832 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12833 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12834 dtrace_dof_error(dof, "non-terminating string table"); 12835 return (-1); 12836 } 12837 } 12838 12839 /* 12840 * Take a second pass through the sections and locate and perform any 12841 * relocations that are present. We do this after the first pass to 12842 * be sure that all sections have had their headers validated. 12843 */ 12844 for (i = 0; i < dof->dofh_secnum; i++) { 12845 dof_sec_t *sec = (dof_sec_t *)(daddr + 12846 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12847 12848 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12849 continue; /* skip sections that are not loadable */ 12850 12851 switch (sec->dofs_type) { 12852 case DOF_SECT_URELHDR: 12853 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12854 return (-1); 12855 break; 12856 } 12857 } 12858 12859 if ((enab = *enabp) == NULL) 12860 enab = *enabp = dtrace_enabling_create(vstate); 12861 12862 for (i = 0; i < dof->dofh_secnum; i++) { 12863 dof_sec_t *sec = (dof_sec_t *)(daddr + 12864 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12865 12866 if (sec->dofs_type != DOF_SECT_ECBDESC) 12867 continue; 12868 12869 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12870 dtrace_enabling_destroy(enab); 12871 *enabp = NULL; 12872 return (-1); 12873 } 12874 12875 dtrace_enabling_add(enab, ep); 12876 } 12877 12878 return (0); 12879 } 12880 12881 /* 12882 * Process DOF for any options. This routine assumes that the DOF has been 12883 * at least processed by dtrace_dof_slurp(). 12884 */ 12885 static int 12886 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12887 { 12888 int i, rval; 12889 uint32_t entsize; 12890 size_t offs; 12891 dof_optdesc_t *desc; 12892 12893 for (i = 0; i < dof->dofh_secnum; i++) { 12894 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12895 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12896 12897 if (sec->dofs_type != DOF_SECT_OPTDESC) 12898 continue; 12899 12900 if (sec->dofs_align != sizeof (uint64_t)) { 12901 dtrace_dof_error(dof, "bad alignment in " 12902 "option description"); 12903 return (EINVAL); 12904 } 12905 12906 if ((entsize = sec->dofs_entsize) == 0) { 12907 dtrace_dof_error(dof, "zeroed option entry size"); 12908 return (EINVAL); 12909 } 12910 12911 if (entsize < sizeof (dof_optdesc_t)) { 12912 dtrace_dof_error(dof, "bad option entry size"); 12913 return (EINVAL); 12914 } 12915 12916 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12917 desc = (dof_optdesc_t *)((uintptr_t)dof + 12918 (uintptr_t)sec->dofs_offset + offs); 12919 12920 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12921 dtrace_dof_error(dof, "non-zero option string"); 12922 return (EINVAL); 12923 } 12924 12925 if (desc->dofo_value == DTRACEOPT_UNSET) { 12926 dtrace_dof_error(dof, "unset option"); 12927 return (EINVAL); 12928 } 12929 12930 if ((rval = dtrace_state_option(state, 12931 desc->dofo_option, desc->dofo_value)) != 0) { 12932 dtrace_dof_error(dof, "rejected option"); 12933 return (rval); 12934 } 12935 } 12936 } 12937 12938 return (0); 12939 } 12940 12941 /* 12942 * DTrace Consumer State Functions 12943 */ 12944 static int 12945 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12946 { 12947 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12948 void *base; 12949 uintptr_t limit; 12950 dtrace_dynvar_t *dvar, *next, *start; 12951 int i; 12952 12953 ASSERT(MUTEX_HELD(&dtrace_lock)); 12954 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12955 12956 bzero(dstate, sizeof (dtrace_dstate_t)); 12957 12958 if ((dstate->dtds_chunksize = chunksize) == 0) 12959 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12960 12961 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12962 size = min; 12963 12964 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12965 return (ENOMEM); 12966 12967 dstate->dtds_size = size; 12968 dstate->dtds_base = base; 12969 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12970 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12971 12972 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12973 12974 if (hashsize != 1 && (hashsize & 1)) 12975 hashsize--; 12976 12977 dstate->dtds_hashsize = hashsize; 12978 dstate->dtds_hash = dstate->dtds_base; 12979 12980 /* 12981 * Set all of our hash buckets to point to the single sink, and (if 12982 * it hasn't already been set), set the sink's hash value to be the 12983 * sink sentinel value. The sink is needed for dynamic variable 12984 * lookups to know that they have iterated over an entire, valid hash 12985 * chain. 12986 */ 12987 for (i = 0; i < hashsize; i++) 12988 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12989 12990 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12991 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12992 12993 /* 12994 * Determine number of active CPUs. Divide free list evenly among 12995 * active CPUs. 12996 */ 12997 start = (dtrace_dynvar_t *) 12998 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12999 limit = (uintptr_t)base + size; 13000 13001 maxper = (limit - (uintptr_t)start) / NCPU; 13002 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 13003 13004 #if !defined(sun) 13005 CPU_FOREACH(i) { 13006 #else 13007 for (i = 0; i < NCPU; i++) { 13008 #endif 13009 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 13010 13011 /* 13012 * If we don't even have enough chunks to make it once through 13013 * NCPUs, we're just going to allocate everything to the first 13014 * CPU. And if we're on the last CPU, we're going to allocate 13015 * whatever is left over. In either case, we set the limit to 13016 * be the limit of the dynamic variable space. 13017 */ 13018 if (maxper == 0 || i == NCPU - 1) { 13019 limit = (uintptr_t)base + size; 13020 start = NULL; 13021 } else { 13022 limit = (uintptr_t)start + maxper; 13023 start = (dtrace_dynvar_t *)limit; 13024 } 13025 13026 ASSERT(limit <= (uintptr_t)base + size); 13027 13028 for (;;) { 13029 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 13030 dstate->dtds_chunksize); 13031 13032 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 13033 break; 13034 13035 dvar->dtdv_next = next; 13036 dvar = next; 13037 } 13038 13039 if (maxper == 0) 13040 break; 13041 } 13042 13043 return (0); 13044 } 13045 13046 static void 13047 dtrace_dstate_fini(dtrace_dstate_t *dstate) 13048 { 13049 ASSERT(MUTEX_HELD(&cpu_lock)); 13050 13051 if (dstate->dtds_base == NULL) 13052 return; 13053 13054 kmem_free(dstate->dtds_base, dstate->dtds_size); 13055 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 13056 } 13057 13058 static void 13059 dtrace_vstate_fini(dtrace_vstate_t *vstate) 13060 { 13061 /* 13062 * Logical XOR, where are you? 13063 */ 13064 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 13065 13066 if (vstate->dtvs_nglobals > 0) { 13067 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 13068 sizeof (dtrace_statvar_t *)); 13069 } 13070 13071 if (vstate->dtvs_ntlocals > 0) { 13072 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 13073 sizeof (dtrace_difv_t)); 13074 } 13075 13076 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13077 13078 if (vstate->dtvs_nlocals > 0) { 13079 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13080 sizeof (dtrace_statvar_t *)); 13081 } 13082 } 13083 13084 #if defined(sun) 13085 static void 13086 dtrace_state_clean(dtrace_state_t *state) 13087 { 13088 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13089 return; 13090 13091 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13092 dtrace_speculation_clean(state); 13093 } 13094 13095 static void 13096 dtrace_state_deadman(dtrace_state_t *state) 13097 { 13098 hrtime_t now; 13099 13100 dtrace_sync(); 13101 13102 now = dtrace_gethrtime(); 13103 13104 if (state != dtrace_anon.dta_state && 13105 now - state->dts_laststatus >= dtrace_deadman_user) 13106 return; 13107 13108 /* 13109 * We must be sure that dts_alive never appears to be less than the 13110 * value upon entry to dtrace_state_deadman(), and because we lack a 13111 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13112 * store INT64_MAX to it, followed by a memory barrier, followed by 13113 * the new value. This assures that dts_alive never appears to be 13114 * less than its true value, regardless of the order in which the 13115 * stores to the underlying storage are issued. 13116 */ 13117 state->dts_alive = INT64_MAX; 13118 dtrace_membar_producer(); 13119 state->dts_alive = now; 13120 } 13121 #else 13122 static void 13123 dtrace_state_clean(void *arg) 13124 { 13125 dtrace_state_t *state = arg; 13126 dtrace_optval_t *opt = state->dts_options; 13127 13128 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13129 return; 13130 13131 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13132 dtrace_speculation_clean(state); 13133 13134 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13135 dtrace_state_clean, state); 13136 } 13137 13138 static void 13139 dtrace_state_deadman(void *arg) 13140 { 13141 dtrace_state_t *state = arg; 13142 hrtime_t now; 13143 13144 dtrace_sync(); 13145 13146 dtrace_debug_output(); 13147 13148 now = dtrace_gethrtime(); 13149 13150 if (state != dtrace_anon.dta_state && 13151 now - state->dts_laststatus >= dtrace_deadman_user) 13152 return; 13153 13154 /* 13155 * We must be sure that dts_alive never appears to be less than the 13156 * value upon entry to dtrace_state_deadman(), and because we lack a 13157 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13158 * store INT64_MAX to it, followed by a memory barrier, followed by 13159 * the new value. This assures that dts_alive never appears to be 13160 * less than its true value, regardless of the order in which the 13161 * stores to the underlying storage are issued. 13162 */ 13163 state->dts_alive = INT64_MAX; 13164 dtrace_membar_producer(); 13165 state->dts_alive = now; 13166 13167 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13168 dtrace_state_deadman, state); 13169 } 13170 #endif 13171 13172 static dtrace_state_t * 13173 #if defined(sun) 13174 dtrace_state_create(dev_t *devp, cred_t *cr) 13175 #else 13176 dtrace_state_create(struct cdev *dev) 13177 #endif 13178 { 13179 #if defined(sun) 13180 minor_t minor; 13181 major_t major; 13182 #else 13183 cred_t *cr = NULL; 13184 int m = 0; 13185 #endif 13186 char c[30]; 13187 dtrace_state_t *state; 13188 dtrace_optval_t *opt; 13189 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13190 13191 ASSERT(MUTEX_HELD(&dtrace_lock)); 13192 ASSERT(MUTEX_HELD(&cpu_lock)); 13193 13194 #if defined(sun) 13195 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13196 VM_BESTFIT | VM_SLEEP); 13197 13198 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13199 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13200 return (NULL); 13201 } 13202 13203 state = ddi_get_soft_state(dtrace_softstate, minor); 13204 #else 13205 if (dev != NULL) { 13206 cr = dev->si_cred; 13207 m = dev2unit(dev); 13208 } 13209 13210 /* Allocate memory for the state. */ 13211 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13212 #endif 13213 13214 state->dts_epid = DTRACE_EPIDNONE + 1; 13215 13216 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13217 #if defined(sun) 13218 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13219 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13220 13221 if (devp != NULL) { 13222 major = getemajor(*devp); 13223 } else { 13224 major = ddi_driver_major(dtrace_devi); 13225 } 13226 13227 state->dts_dev = makedevice(major, minor); 13228 13229 if (devp != NULL) 13230 *devp = state->dts_dev; 13231 #else 13232 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13233 state->dts_dev = dev; 13234 #endif 13235 13236 /* 13237 * We allocate NCPU buffers. On the one hand, this can be quite 13238 * a bit of memory per instance (nearly 36K on a Starcat). On the 13239 * other hand, it saves an additional memory reference in the probe 13240 * path. 13241 */ 13242 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13243 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13244 13245 #if defined(sun) 13246 state->dts_cleaner = CYCLIC_NONE; 13247 state->dts_deadman = CYCLIC_NONE; 13248 #else 13249 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13250 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13251 #endif 13252 state->dts_vstate.dtvs_state = state; 13253 13254 for (i = 0; i < DTRACEOPT_MAX; i++) 13255 state->dts_options[i] = DTRACEOPT_UNSET; 13256 13257 /* 13258 * Set the default options. 13259 */ 13260 opt = state->dts_options; 13261 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13262 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13263 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13264 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13265 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13266 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13267 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13268 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13269 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13270 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13271 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13272 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13273 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13274 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13275 13276 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13277 13278 /* 13279 * Depending on the user credentials, we set flag bits which alter probe 13280 * visibility or the amount of destructiveness allowed. In the case of 13281 * actual anonymous tracing, or the possession of all privileges, all of 13282 * the normal checks are bypassed. 13283 */ 13284 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13285 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13286 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13287 } else { 13288 /* 13289 * Set up the credentials for this instantiation. We take a 13290 * hold on the credential to prevent it from disappearing on 13291 * us; this in turn prevents the zone_t referenced by this 13292 * credential from disappearing. This means that we can 13293 * examine the credential and the zone from probe context. 13294 */ 13295 crhold(cr); 13296 state->dts_cred.dcr_cred = cr; 13297 13298 /* 13299 * CRA_PROC means "we have *some* privilege for dtrace" and 13300 * unlocks the use of variables like pid, zonename, etc. 13301 */ 13302 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13303 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13304 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13305 } 13306 13307 /* 13308 * dtrace_user allows use of syscall and profile providers. 13309 * If the user also has proc_owner and/or proc_zone, we 13310 * extend the scope to include additional visibility and 13311 * destructive power. 13312 */ 13313 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13314 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13315 state->dts_cred.dcr_visible |= 13316 DTRACE_CRV_ALLPROC; 13317 13318 state->dts_cred.dcr_action |= 13319 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13320 } 13321 13322 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13323 state->dts_cred.dcr_visible |= 13324 DTRACE_CRV_ALLZONE; 13325 13326 state->dts_cred.dcr_action |= 13327 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13328 } 13329 13330 /* 13331 * If we have all privs in whatever zone this is, 13332 * we can do destructive things to processes which 13333 * have altered credentials. 13334 */ 13335 #if defined(sun) 13336 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13337 cr->cr_zone->zone_privset)) { 13338 state->dts_cred.dcr_action |= 13339 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13340 } 13341 #endif 13342 } 13343 13344 /* 13345 * Holding the dtrace_kernel privilege also implies that 13346 * the user has the dtrace_user privilege from a visibility 13347 * perspective. But without further privileges, some 13348 * destructive actions are not available. 13349 */ 13350 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13351 /* 13352 * Make all probes in all zones visible. However, 13353 * this doesn't mean that all actions become available 13354 * to all zones. 13355 */ 13356 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13357 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13358 13359 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13360 DTRACE_CRA_PROC; 13361 /* 13362 * Holding proc_owner means that destructive actions 13363 * for *this* zone are allowed. 13364 */ 13365 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13366 state->dts_cred.dcr_action |= 13367 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13368 13369 /* 13370 * Holding proc_zone means that destructive actions 13371 * for this user/group ID in all zones is allowed. 13372 */ 13373 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13374 state->dts_cred.dcr_action |= 13375 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13376 13377 #if defined(sun) 13378 /* 13379 * If we have all privs in whatever zone this is, 13380 * we can do destructive things to processes which 13381 * have altered credentials. 13382 */ 13383 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13384 cr->cr_zone->zone_privset)) { 13385 state->dts_cred.dcr_action |= 13386 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13387 } 13388 #endif 13389 } 13390 13391 /* 13392 * Holding the dtrace_proc privilege gives control over fasttrap 13393 * and pid providers. We need to grant wider destructive 13394 * privileges in the event that the user has proc_owner and/or 13395 * proc_zone. 13396 */ 13397 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13398 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13399 state->dts_cred.dcr_action |= 13400 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13401 13402 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13403 state->dts_cred.dcr_action |= 13404 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13405 } 13406 } 13407 13408 return (state); 13409 } 13410 13411 static int 13412 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13413 { 13414 dtrace_optval_t *opt = state->dts_options, size; 13415 processorid_t cpu = 0;; 13416 int flags = 0, rval; 13417 13418 ASSERT(MUTEX_HELD(&dtrace_lock)); 13419 ASSERT(MUTEX_HELD(&cpu_lock)); 13420 ASSERT(which < DTRACEOPT_MAX); 13421 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13422 (state == dtrace_anon.dta_state && 13423 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13424 13425 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13426 return (0); 13427 13428 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13429 cpu = opt[DTRACEOPT_CPU]; 13430 13431 if (which == DTRACEOPT_SPECSIZE) 13432 flags |= DTRACEBUF_NOSWITCH; 13433 13434 if (which == DTRACEOPT_BUFSIZE) { 13435 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13436 flags |= DTRACEBUF_RING; 13437 13438 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13439 flags |= DTRACEBUF_FILL; 13440 13441 if (state != dtrace_anon.dta_state || 13442 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13443 flags |= DTRACEBUF_INACTIVE; 13444 } 13445 13446 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13447 /* 13448 * The size must be 8-byte aligned. If the size is not 8-byte 13449 * aligned, drop it down by the difference. 13450 */ 13451 if (size & (sizeof (uint64_t) - 1)) 13452 size -= size & (sizeof (uint64_t) - 1); 13453 13454 if (size < state->dts_reserve) { 13455 /* 13456 * Buffers always must be large enough to accommodate 13457 * their prereserved space. We return E2BIG instead 13458 * of ENOMEM in this case to allow for user-level 13459 * software to differentiate the cases. 13460 */ 13461 return (E2BIG); 13462 } 13463 13464 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13465 13466 if (rval != ENOMEM) { 13467 opt[which] = size; 13468 return (rval); 13469 } 13470 13471 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13472 return (rval); 13473 } 13474 13475 return (ENOMEM); 13476 } 13477 13478 static int 13479 dtrace_state_buffers(dtrace_state_t *state) 13480 { 13481 dtrace_speculation_t *spec = state->dts_speculations; 13482 int rval, i; 13483 13484 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13485 DTRACEOPT_BUFSIZE)) != 0) 13486 return (rval); 13487 13488 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13489 DTRACEOPT_AGGSIZE)) != 0) 13490 return (rval); 13491 13492 for (i = 0; i < state->dts_nspeculations; i++) { 13493 if ((rval = dtrace_state_buffer(state, 13494 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13495 return (rval); 13496 } 13497 13498 return (0); 13499 } 13500 13501 static void 13502 dtrace_state_prereserve(dtrace_state_t *state) 13503 { 13504 dtrace_ecb_t *ecb; 13505 dtrace_probe_t *probe; 13506 13507 state->dts_reserve = 0; 13508 13509 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13510 return; 13511 13512 /* 13513 * If our buffer policy is a "fill" buffer policy, we need to set the 13514 * prereserved space to be the space required by the END probes. 13515 */ 13516 probe = dtrace_probes[dtrace_probeid_end - 1]; 13517 ASSERT(probe != NULL); 13518 13519 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13520 if (ecb->dte_state != state) 13521 continue; 13522 13523 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13524 } 13525 } 13526 13527 static int 13528 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13529 { 13530 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13531 dtrace_speculation_t *spec; 13532 dtrace_buffer_t *buf; 13533 #if defined(sun) 13534 cyc_handler_t hdlr; 13535 cyc_time_t when; 13536 #endif 13537 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13538 dtrace_icookie_t cookie; 13539 13540 mutex_enter(&cpu_lock); 13541 mutex_enter(&dtrace_lock); 13542 13543 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13544 rval = EBUSY; 13545 goto out; 13546 } 13547 13548 /* 13549 * Before we can perform any checks, we must prime all of the 13550 * retained enablings that correspond to this state. 13551 */ 13552 dtrace_enabling_prime(state); 13553 13554 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13555 rval = EACCES; 13556 goto out; 13557 } 13558 13559 dtrace_state_prereserve(state); 13560 13561 /* 13562 * Now we want to do is try to allocate our speculations. 13563 * We do not automatically resize the number of speculations; if 13564 * this fails, we will fail the operation. 13565 */ 13566 nspec = opt[DTRACEOPT_NSPEC]; 13567 ASSERT(nspec != DTRACEOPT_UNSET); 13568 13569 if (nspec > INT_MAX) { 13570 rval = ENOMEM; 13571 goto out; 13572 } 13573 13574 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13575 13576 if (spec == NULL) { 13577 rval = ENOMEM; 13578 goto out; 13579 } 13580 13581 state->dts_speculations = spec; 13582 state->dts_nspeculations = (int)nspec; 13583 13584 for (i = 0; i < nspec; i++) { 13585 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13586 rval = ENOMEM; 13587 goto err; 13588 } 13589 13590 spec[i].dtsp_buffer = buf; 13591 } 13592 13593 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13594 if (dtrace_anon.dta_state == NULL) { 13595 rval = ENOENT; 13596 goto out; 13597 } 13598 13599 if (state->dts_necbs != 0) { 13600 rval = EALREADY; 13601 goto out; 13602 } 13603 13604 state->dts_anon = dtrace_anon_grab(); 13605 ASSERT(state->dts_anon != NULL); 13606 state = state->dts_anon; 13607 13608 /* 13609 * We want "grabanon" to be set in the grabbed state, so we'll 13610 * copy that option value from the grabbing state into the 13611 * grabbed state. 13612 */ 13613 state->dts_options[DTRACEOPT_GRABANON] = 13614 opt[DTRACEOPT_GRABANON]; 13615 13616 *cpu = dtrace_anon.dta_beganon; 13617 13618 /* 13619 * If the anonymous state is active (as it almost certainly 13620 * is if the anonymous enabling ultimately matched anything), 13621 * we don't allow any further option processing -- but we 13622 * don't return failure. 13623 */ 13624 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13625 goto out; 13626 } 13627 13628 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13629 opt[DTRACEOPT_AGGSIZE] != 0) { 13630 if (state->dts_aggregations == NULL) { 13631 /* 13632 * We're not going to create an aggregation buffer 13633 * because we don't have any ECBs that contain 13634 * aggregations -- set this option to 0. 13635 */ 13636 opt[DTRACEOPT_AGGSIZE] = 0; 13637 } else { 13638 /* 13639 * If we have an aggregation buffer, we must also have 13640 * a buffer to use as scratch. 13641 */ 13642 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13643 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13644 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13645 } 13646 } 13647 } 13648 13649 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13650 opt[DTRACEOPT_SPECSIZE] != 0) { 13651 if (!state->dts_speculates) { 13652 /* 13653 * We're not going to create speculation buffers 13654 * because we don't have any ECBs that actually 13655 * speculate -- set the speculation size to 0. 13656 */ 13657 opt[DTRACEOPT_SPECSIZE] = 0; 13658 } 13659 } 13660 13661 /* 13662 * The bare minimum size for any buffer that we're actually going to 13663 * do anything to is sizeof (uint64_t). 13664 */ 13665 sz = sizeof (uint64_t); 13666 13667 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13668 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13669 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13670 /* 13671 * A buffer size has been explicitly set to 0 (or to a size 13672 * that will be adjusted to 0) and we need the space -- we 13673 * need to return failure. We return ENOSPC to differentiate 13674 * it from failing to allocate a buffer due to failure to meet 13675 * the reserve (for which we return E2BIG). 13676 */ 13677 rval = ENOSPC; 13678 goto out; 13679 } 13680 13681 if ((rval = dtrace_state_buffers(state)) != 0) 13682 goto err; 13683 13684 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13685 sz = dtrace_dstate_defsize; 13686 13687 do { 13688 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13689 13690 if (rval == 0) 13691 break; 13692 13693 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13694 goto err; 13695 } while (sz >>= 1); 13696 13697 opt[DTRACEOPT_DYNVARSIZE] = sz; 13698 13699 if (rval != 0) 13700 goto err; 13701 13702 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13703 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13704 13705 if (opt[DTRACEOPT_CLEANRATE] == 0) 13706 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13707 13708 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13709 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13710 13711 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13712 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13713 13714 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13715 #if defined(sun) 13716 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13717 hdlr.cyh_arg = state; 13718 hdlr.cyh_level = CY_LOW_LEVEL; 13719 13720 when.cyt_when = 0; 13721 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13722 13723 state->dts_cleaner = cyclic_add(&hdlr, &when); 13724 13725 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13726 hdlr.cyh_arg = state; 13727 hdlr.cyh_level = CY_LOW_LEVEL; 13728 13729 when.cyt_when = 0; 13730 when.cyt_interval = dtrace_deadman_interval; 13731 13732 state->dts_deadman = cyclic_add(&hdlr, &when); 13733 #else 13734 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13735 dtrace_state_clean, state); 13736 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13737 dtrace_state_deadman, state); 13738 #endif 13739 13740 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13741 13742 /* 13743 * Now it's time to actually fire the BEGIN probe. We need to disable 13744 * interrupts here both to record the CPU on which we fired the BEGIN 13745 * probe (the data from this CPU will be processed first at user 13746 * level) and to manually activate the buffer for this CPU. 13747 */ 13748 cookie = dtrace_interrupt_disable(); 13749 *cpu = curcpu; 13750 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13751 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13752 13753 dtrace_probe(dtrace_probeid_begin, 13754 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13755 dtrace_interrupt_enable(cookie); 13756 /* 13757 * We may have had an exit action from a BEGIN probe; only change our 13758 * state to ACTIVE if we're still in WARMUP. 13759 */ 13760 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13761 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13762 13763 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13764 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13765 13766 /* 13767 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13768 * want each CPU to transition its principal buffer out of the 13769 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13770 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13771 * atomically transition from processing none of a state's ECBs to 13772 * processing all of them. 13773 */ 13774 dtrace_xcall(DTRACE_CPUALL, 13775 (dtrace_xcall_t)dtrace_buffer_activate, state); 13776 goto out; 13777 13778 err: 13779 dtrace_buffer_free(state->dts_buffer); 13780 dtrace_buffer_free(state->dts_aggbuffer); 13781 13782 if ((nspec = state->dts_nspeculations) == 0) { 13783 ASSERT(state->dts_speculations == NULL); 13784 goto out; 13785 } 13786 13787 spec = state->dts_speculations; 13788 ASSERT(spec != NULL); 13789 13790 for (i = 0; i < state->dts_nspeculations; i++) { 13791 if ((buf = spec[i].dtsp_buffer) == NULL) 13792 break; 13793 13794 dtrace_buffer_free(buf); 13795 kmem_free(buf, bufsize); 13796 } 13797 13798 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13799 state->dts_nspeculations = 0; 13800 state->dts_speculations = NULL; 13801 13802 out: 13803 mutex_exit(&dtrace_lock); 13804 mutex_exit(&cpu_lock); 13805 13806 return (rval); 13807 } 13808 13809 static int 13810 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13811 { 13812 dtrace_icookie_t cookie; 13813 13814 ASSERT(MUTEX_HELD(&dtrace_lock)); 13815 13816 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13817 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13818 return (EINVAL); 13819 13820 /* 13821 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13822 * to be sure that every CPU has seen it. See below for the details 13823 * on why this is done. 13824 */ 13825 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13826 dtrace_sync(); 13827 13828 /* 13829 * By this point, it is impossible for any CPU to be still processing 13830 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13831 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13832 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13833 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13834 * iff we're in the END probe. 13835 */ 13836 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13837 dtrace_sync(); 13838 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13839 13840 /* 13841 * Finally, we can release the reserve and call the END probe. We 13842 * disable interrupts across calling the END probe to allow us to 13843 * return the CPU on which we actually called the END probe. This 13844 * allows user-land to be sure that this CPU's principal buffer is 13845 * processed last. 13846 */ 13847 state->dts_reserve = 0; 13848 13849 cookie = dtrace_interrupt_disable(); 13850 *cpu = curcpu; 13851 dtrace_probe(dtrace_probeid_end, 13852 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13853 dtrace_interrupt_enable(cookie); 13854 13855 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13856 dtrace_sync(); 13857 13858 return (0); 13859 } 13860 13861 static int 13862 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13863 dtrace_optval_t val) 13864 { 13865 ASSERT(MUTEX_HELD(&dtrace_lock)); 13866 13867 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13868 return (EBUSY); 13869 13870 if (option >= DTRACEOPT_MAX) 13871 return (EINVAL); 13872 13873 if (option != DTRACEOPT_CPU && val < 0) 13874 return (EINVAL); 13875 13876 switch (option) { 13877 case DTRACEOPT_DESTRUCTIVE: 13878 if (dtrace_destructive_disallow) 13879 return (EACCES); 13880 13881 state->dts_cred.dcr_destructive = 1; 13882 break; 13883 13884 case DTRACEOPT_BUFSIZE: 13885 case DTRACEOPT_DYNVARSIZE: 13886 case DTRACEOPT_AGGSIZE: 13887 case DTRACEOPT_SPECSIZE: 13888 case DTRACEOPT_STRSIZE: 13889 if (val < 0) 13890 return (EINVAL); 13891 13892 if (val >= LONG_MAX) { 13893 /* 13894 * If this is an otherwise negative value, set it to 13895 * the highest multiple of 128m less than LONG_MAX. 13896 * Technically, we're adjusting the size without 13897 * regard to the buffer resizing policy, but in fact, 13898 * this has no effect -- if we set the buffer size to 13899 * ~LONG_MAX and the buffer policy is ultimately set to 13900 * be "manual", the buffer allocation is guaranteed to 13901 * fail, if only because the allocation requires two 13902 * buffers. (We set the the size to the highest 13903 * multiple of 128m because it ensures that the size 13904 * will remain a multiple of a megabyte when 13905 * repeatedly halved -- all the way down to 15m.) 13906 */ 13907 val = LONG_MAX - (1 << 27) + 1; 13908 } 13909 } 13910 13911 state->dts_options[option] = val; 13912 13913 return (0); 13914 } 13915 13916 static void 13917 dtrace_state_destroy(dtrace_state_t *state) 13918 { 13919 dtrace_ecb_t *ecb; 13920 dtrace_vstate_t *vstate = &state->dts_vstate; 13921 #if defined(sun) 13922 minor_t minor = getminor(state->dts_dev); 13923 #endif 13924 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13925 dtrace_speculation_t *spec = state->dts_speculations; 13926 int nspec = state->dts_nspeculations; 13927 uint32_t match; 13928 13929 ASSERT(MUTEX_HELD(&dtrace_lock)); 13930 ASSERT(MUTEX_HELD(&cpu_lock)); 13931 13932 /* 13933 * First, retract any retained enablings for this state. 13934 */ 13935 dtrace_enabling_retract(state); 13936 ASSERT(state->dts_nretained == 0); 13937 13938 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13939 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13940 /* 13941 * We have managed to come into dtrace_state_destroy() on a 13942 * hot enabling -- almost certainly because of a disorderly 13943 * shutdown of a consumer. (That is, a consumer that is 13944 * exiting without having called dtrace_stop().) In this case, 13945 * we're going to set our activity to be KILLED, and then 13946 * issue a sync to be sure that everyone is out of probe 13947 * context before we start blowing away ECBs. 13948 */ 13949 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13950 dtrace_sync(); 13951 } 13952 13953 /* 13954 * Release the credential hold we took in dtrace_state_create(). 13955 */ 13956 if (state->dts_cred.dcr_cred != NULL) 13957 crfree(state->dts_cred.dcr_cred); 13958 13959 /* 13960 * Now we can safely disable and destroy any enabled probes. Because 13961 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13962 * (especially if they're all enabled), we take two passes through the 13963 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13964 * in the second we disable whatever is left over. 13965 */ 13966 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13967 for (i = 0; i < state->dts_necbs; i++) { 13968 if ((ecb = state->dts_ecbs[i]) == NULL) 13969 continue; 13970 13971 if (match && ecb->dte_probe != NULL) { 13972 dtrace_probe_t *probe = ecb->dte_probe; 13973 dtrace_provider_t *prov = probe->dtpr_provider; 13974 13975 if (!(prov->dtpv_priv.dtpp_flags & match)) 13976 continue; 13977 } 13978 13979 dtrace_ecb_disable(ecb); 13980 dtrace_ecb_destroy(ecb); 13981 } 13982 13983 if (!match) 13984 break; 13985 } 13986 13987 /* 13988 * Before we free the buffers, perform one more sync to assure that 13989 * every CPU is out of probe context. 13990 */ 13991 dtrace_sync(); 13992 13993 dtrace_buffer_free(state->dts_buffer); 13994 dtrace_buffer_free(state->dts_aggbuffer); 13995 13996 for (i = 0; i < nspec; i++) 13997 dtrace_buffer_free(spec[i].dtsp_buffer); 13998 13999 #if defined(sun) 14000 if (state->dts_cleaner != CYCLIC_NONE) 14001 cyclic_remove(state->dts_cleaner); 14002 14003 if (state->dts_deadman != CYCLIC_NONE) 14004 cyclic_remove(state->dts_deadman); 14005 #else 14006 callout_stop(&state->dts_cleaner); 14007 callout_drain(&state->dts_cleaner); 14008 callout_stop(&state->dts_deadman); 14009 callout_drain(&state->dts_deadman); 14010 #endif 14011 14012 dtrace_dstate_fini(&vstate->dtvs_dynvars); 14013 dtrace_vstate_fini(vstate); 14014 if (state->dts_ecbs != NULL) 14015 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 14016 14017 if (state->dts_aggregations != NULL) { 14018 #ifdef DEBUG 14019 for (i = 0; i < state->dts_naggregations; i++) 14020 ASSERT(state->dts_aggregations[i] == NULL); 14021 #endif 14022 ASSERT(state->dts_naggregations > 0); 14023 kmem_free(state->dts_aggregations, 14024 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 14025 } 14026 14027 kmem_free(state->dts_buffer, bufsize); 14028 kmem_free(state->dts_aggbuffer, bufsize); 14029 14030 for (i = 0; i < nspec; i++) 14031 kmem_free(spec[i].dtsp_buffer, bufsize); 14032 14033 if (spec != NULL) 14034 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 14035 14036 dtrace_format_destroy(state); 14037 14038 if (state->dts_aggid_arena != NULL) { 14039 #if defined(sun) 14040 vmem_destroy(state->dts_aggid_arena); 14041 #else 14042 delete_unrhdr(state->dts_aggid_arena); 14043 #endif 14044 state->dts_aggid_arena = NULL; 14045 } 14046 #if defined(sun) 14047 ddi_soft_state_free(dtrace_softstate, minor); 14048 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14049 #endif 14050 } 14051 14052 /* 14053 * DTrace Anonymous Enabling Functions 14054 */ 14055 static dtrace_state_t * 14056 dtrace_anon_grab(void) 14057 { 14058 dtrace_state_t *state; 14059 14060 ASSERT(MUTEX_HELD(&dtrace_lock)); 14061 14062 if ((state = dtrace_anon.dta_state) == NULL) { 14063 ASSERT(dtrace_anon.dta_enabling == NULL); 14064 return (NULL); 14065 } 14066 14067 ASSERT(dtrace_anon.dta_enabling != NULL); 14068 ASSERT(dtrace_retained != NULL); 14069 14070 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 14071 dtrace_anon.dta_enabling = NULL; 14072 dtrace_anon.dta_state = NULL; 14073 14074 return (state); 14075 } 14076 14077 static void 14078 dtrace_anon_property(void) 14079 { 14080 int i, rv; 14081 dtrace_state_t *state; 14082 dof_hdr_t *dof; 14083 char c[32]; /* enough for "dof-data-" + digits */ 14084 14085 ASSERT(MUTEX_HELD(&dtrace_lock)); 14086 ASSERT(MUTEX_HELD(&cpu_lock)); 14087 14088 for (i = 0; ; i++) { 14089 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14090 14091 dtrace_err_verbose = 1; 14092 14093 if ((dof = dtrace_dof_property(c)) == NULL) { 14094 dtrace_err_verbose = 0; 14095 break; 14096 } 14097 14098 #if defined(sun) 14099 /* 14100 * We want to create anonymous state, so we need to transition 14101 * the kernel debugger to indicate that DTrace is active. If 14102 * this fails (e.g. because the debugger has modified text in 14103 * some way), we won't continue with the processing. 14104 */ 14105 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14106 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14107 "enabling ignored."); 14108 dtrace_dof_destroy(dof); 14109 break; 14110 } 14111 #endif 14112 14113 /* 14114 * If we haven't allocated an anonymous state, we'll do so now. 14115 */ 14116 if ((state = dtrace_anon.dta_state) == NULL) { 14117 #if defined(sun) 14118 state = dtrace_state_create(NULL, NULL); 14119 #else 14120 state = dtrace_state_create(NULL); 14121 #endif 14122 dtrace_anon.dta_state = state; 14123 14124 if (state == NULL) { 14125 /* 14126 * This basically shouldn't happen: the only 14127 * failure mode from dtrace_state_create() is a 14128 * failure of ddi_soft_state_zalloc() that 14129 * itself should never happen. Still, the 14130 * interface allows for a failure mode, and 14131 * we want to fail as gracefully as possible: 14132 * we'll emit an error message and cease 14133 * processing anonymous state in this case. 14134 */ 14135 cmn_err(CE_WARN, "failed to create " 14136 "anonymous state"); 14137 dtrace_dof_destroy(dof); 14138 break; 14139 } 14140 } 14141 14142 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14143 &dtrace_anon.dta_enabling, 0, B_TRUE); 14144 14145 if (rv == 0) 14146 rv = dtrace_dof_options(dof, state); 14147 14148 dtrace_err_verbose = 0; 14149 dtrace_dof_destroy(dof); 14150 14151 if (rv != 0) { 14152 /* 14153 * This is malformed DOF; chuck any anonymous state 14154 * that we created. 14155 */ 14156 ASSERT(dtrace_anon.dta_enabling == NULL); 14157 dtrace_state_destroy(state); 14158 dtrace_anon.dta_state = NULL; 14159 break; 14160 } 14161 14162 ASSERT(dtrace_anon.dta_enabling != NULL); 14163 } 14164 14165 if (dtrace_anon.dta_enabling != NULL) { 14166 int rval; 14167 14168 /* 14169 * dtrace_enabling_retain() can only fail because we are 14170 * trying to retain more enablings than are allowed -- but 14171 * we only have one anonymous enabling, and we are guaranteed 14172 * to be allowed at least one retained enabling; we assert 14173 * that dtrace_enabling_retain() returns success. 14174 */ 14175 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14176 ASSERT(rval == 0); 14177 14178 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14179 } 14180 } 14181 14182 /* 14183 * DTrace Helper Functions 14184 */ 14185 static void 14186 dtrace_helper_trace(dtrace_helper_action_t *helper, 14187 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14188 { 14189 uint32_t size, next, nnext, i; 14190 dtrace_helptrace_t *ent; 14191 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14192 14193 if (!dtrace_helptrace_enabled) 14194 return; 14195 14196 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14197 14198 /* 14199 * What would a tracing framework be without its own tracing 14200 * framework? (Well, a hell of a lot simpler, for starters...) 14201 */ 14202 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14203 sizeof (uint64_t) - sizeof (uint64_t); 14204 14205 /* 14206 * Iterate until we can allocate a slot in the trace buffer. 14207 */ 14208 do { 14209 next = dtrace_helptrace_next; 14210 14211 if (next + size < dtrace_helptrace_bufsize) { 14212 nnext = next + size; 14213 } else { 14214 nnext = size; 14215 } 14216 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14217 14218 /* 14219 * We have our slot; fill it in. 14220 */ 14221 if (nnext == size) 14222 next = 0; 14223 14224 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14225 ent->dtht_helper = helper; 14226 ent->dtht_where = where; 14227 ent->dtht_nlocals = vstate->dtvs_nlocals; 14228 14229 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14230 mstate->dtms_fltoffs : -1; 14231 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14232 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14233 14234 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14235 dtrace_statvar_t *svar; 14236 14237 if ((svar = vstate->dtvs_locals[i]) == NULL) 14238 continue; 14239 14240 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14241 ent->dtht_locals[i] = 14242 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14243 } 14244 } 14245 14246 static uint64_t 14247 dtrace_helper(int which, dtrace_mstate_t *mstate, 14248 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14249 { 14250 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14251 uint64_t sarg0 = mstate->dtms_arg[0]; 14252 uint64_t sarg1 = mstate->dtms_arg[1]; 14253 uint64_t rval = 0; 14254 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14255 dtrace_helper_action_t *helper; 14256 dtrace_vstate_t *vstate; 14257 dtrace_difo_t *pred; 14258 int i, trace = dtrace_helptrace_enabled; 14259 14260 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14261 14262 if (helpers == NULL) 14263 return (0); 14264 14265 if ((helper = helpers->dthps_actions[which]) == NULL) 14266 return (0); 14267 14268 vstate = &helpers->dthps_vstate; 14269 mstate->dtms_arg[0] = arg0; 14270 mstate->dtms_arg[1] = arg1; 14271 14272 /* 14273 * Now iterate over each helper. If its predicate evaluates to 'true', 14274 * we'll call the corresponding actions. Note that the below calls 14275 * to dtrace_dif_emulate() may set faults in machine state. This is 14276 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14277 * the stored DIF offset with its own (which is the desired behavior). 14278 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14279 * from machine state; this is okay, too. 14280 */ 14281 for (; helper != NULL; helper = helper->dtha_next) { 14282 if ((pred = helper->dtha_predicate) != NULL) { 14283 if (trace) 14284 dtrace_helper_trace(helper, mstate, vstate, 0); 14285 14286 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14287 goto next; 14288 14289 if (*flags & CPU_DTRACE_FAULT) 14290 goto err; 14291 } 14292 14293 for (i = 0; i < helper->dtha_nactions; i++) { 14294 if (trace) 14295 dtrace_helper_trace(helper, 14296 mstate, vstate, i + 1); 14297 14298 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14299 mstate, vstate, state); 14300 14301 if (*flags & CPU_DTRACE_FAULT) 14302 goto err; 14303 } 14304 14305 next: 14306 if (trace) 14307 dtrace_helper_trace(helper, mstate, vstate, 14308 DTRACE_HELPTRACE_NEXT); 14309 } 14310 14311 if (trace) 14312 dtrace_helper_trace(helper, mstate, vstate, 14313 DTRACE_HELPTRACE_DONE); 14314 14315 /* 14316 * Restore the arg0 that we saved upon entry. 14317 */ 14318 mstate->dtms_arg[0] = sarg0; 14319 mstate->dtms_arg[1] = sarg1; 14320 14321 return (rval); 14322 14323 err: 14324 if (trace) 14325 dtrace_helper_trace(helper, mstate, vstate, 14326 DTRACE_HELPTRACE_ERR); 14327 14328 /* 14329 * Restore the arg0 that we saved upon entry. 14330 */ 14331 mstate->dtms_arg[0] = sarg0; 14332 mstate->dtms_arg[1] = sarg1; 14333 14334 return (0); 14335 } 14336 14337 static void 14338 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14339 dtrace_vstate_t *vstate) 14340 { 14341 int i; 14342 14343 if (helper->dtha_predicate != NULL) 14344 dtrace_difo_release(helper->dtha_predicate, vstate); 14345 14346 for (i = 0; i < helper->dtha_nactions; i++) { 14347 ASSERT(helper->dtha_actions[i] != NULL); 14348 dtrace_difo_release(helper->dtha_actions[i], vstate); 14349 } 14350 14351 kmem_free(helper->dtha_actions, 14352 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14353 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14354 } 14355 14356 static int 14357 dtrace_helper_destroygen(int gen) 14358 { 14359 proc_t *p = curproc; 14360 dtrace_helpers_t *help = p->p_dtrace_helpers; 14361 dtrace_vstate_t *vstate; 14362 int i; 14363 14364 ASSERT(MUTEX_HELD(&dtrace_lock)); 14365 14366 if (help == NULL || gen > help->dthps_generation) 14367 return (EINVAL); 14368 14369 vstate = &help->dthps_vstate; 14370 14371 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14372 dtrace_helper_action_t *last = NULL, *h, *next; 14373 14374 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14375 next = h->dtha_next; 14376 14377 if (h->dtha_generation == gen) { 14378 if (last != NULL) { 14379 last->dtha_next = next; 14380 } else { 14381 help->dthps_actions[i] = next; 14382 } 14383 14384 dtrace_helper_action_destroy(h, vstate); 14385 } else { 14386 last = h; 14387 } 14388 } 14389 } 14390 14391 /* 14392 * Interate until we've cleared out all helper providers with the 14393 * given generation number. 14394 */ 14395 for (;;) { 14396 dtrace_helper_provider_t *prov; 14397 14398 /* 14399 * Look for a helper provider with the right generation. We 14400 * have to start back at the beginning of the list each time 14401 * because we drop dtrace_lock. It's unlikely that we'll make 14402 * more than two passes. 14403 */ 14404 for (i = 0; i < help->dthps_nprovs; i++) { 14405 prov = help->dthps_provs[i]; 14406 14407 if (prov->dthp_generation == gen) 14408 break; 14409 } 14410 14411 /* 14412 * If there were no matches, we're done. 14413 */ 14414 if (i == help->dthps_nprovs) 14415 break; 14416 14417 /* 14418 * Move the last helper provider into this slot. 14419 */ 14420 help->dthps_nprovs--; 14421 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14422 help->dthps_provs[help->dthps_nprovs] = NULL; 14423 14424 mutex_exit(&dtrace_lock); 14425 14426 /* 14427 * If we have a meta provider, remove this helper provider. 14428 */ 14429 mutex_enter(&dtrace_meta_lock); 14430 if (dtrace_meta_pid != NULL) { 14431 ASSERT(dtrace_deferred_pid == NULL); 14432 dtrace_helper_provider_remove(&prov->dthp_prov, 14433 p->p_pid); 14434 } 14435 mutex_exit(&dtrace_meta_lock); 14436 14437 dtrace_helper_provider_destroy(prov); 14438 14439 mutex_enter(&dtrace_lock); 14440 } 14441 14442 return (0); 14443 } 14444 14445 static int 14446 dtrace_helper_validate(dtrace_helper_action_t *helper) 14447 { 14448 int err = 0, i; 14449 dtrace_difo_t *dp; 14450 14451 if ((dp = helper->dtha_predicate) != NULL) 14452 err += dtrace_difo_validate_helper(dp); 14453 14454 for (i = 0; i < helper->dtha_nactions; i++) 14455 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14456 14457 return (err == 0); 14458 } 14459 14460 static int 14461 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14462 { 14463 dtrace_helpers_t *help; 14464 dtrace_helper_action_t *helper, *last; 14465 dtrace_actdesc_t *act; 14466 dtrace_vstate_t *vstate; 14467 dtrace_predicate_t *pred; 14468 int count = 0, nactions = 0, i; 14469 14470 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14471 return (EINVAL); 14472 14473 help = curproc->p_dtrace_helpers; 14474 last = help->dthps_actions[which]; 14475 vstate = &help->dthps_vstate; 14476 14477 for (count = 0; last != NULL; last = last->dtha_next) { 14478 count++; 14479 if (last->dtha_next == NULL) 14480 break; 14481 } 14482 14483 /* 14484 * If we already have dtrace_helper_actions_max helper actions for this 14485 * helper action type, we'll refuse to add a new one. 14486 */ 14487 if (count >= dtrace_helper_actions_max) 14488 return (ENOSPC); 14489 14490 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14491 helper->dtha_generation = help->dthps_generation; 14492 14493 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14494 ASSERT(pred->dtp_difo != NULL); 14495 dtrace_difo_hold(pred->dtp_difo); 14496 helper->dtha_predicate = pred->dtp_difo; 14497 } 14498 14499 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14500 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14501 goto err; 14502 14503 if (act->dtad_difo == NULL) 14504 goto err; 14505 14506 nactions++; 14507 } 14508 14509 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14510 (helper->dtha_nactions = nactions), KM_SLEEP); 14511 14512 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14513 dtrace_difo_hold(act->dtad_difo); 14514 helper->dtha_actions[i++] = act->dtad_difo; 14515 } 14516 14517 if (!dtrace_helper_validate(helper)) 14518 goto err; 14519 14520 if (last == NULL) { 14521 help->dthps_actions[which] = helper; 14522 } else { 14523 last->dtha_next = helper; 14524 } 14525 14526 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14527 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14528 dtrace_helptrace_next = 0; 14529 } 14530 14531 return (0); 14532 err: 14533 dtrace_helper_action_destroy(helper, vstate); 14534 return (EINVAL); 14535 } 14536 14537 static void 14538 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14539 dof_helper_t *dofhp) 14540 { 14541 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14542 14543 mutex_enter(&dtrace_meta_lock); 14544 mutex_enter(&dtrace_lock); 14545 14546 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14547 /* 14548 * If the dtrace module is loaded but not attached, or if 14549 * there aren't isn't a meta provider registered to deal with 14550 * these provider descriptions, we need to postpone creating 14551 * the actual providers until later. 14552 */ 14553 14554 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14555 dtrace_deferred_pid != help) { 14556 help->dthps_deferred = 1; 14557 help->dthps_pid = p->p_pid; 14558 help->dthps_next = dtrace_deferred_pid; 14559 help->dthps_prev = NULL; 14560 if (dtrace_deferred_pid != NULL) 14561 dtrace_deferred_pid->dthps_prev = help; 14562 dtrace_deferred_pid = help; 14563 } 14564 14565 mutex_exit(&dtrace_lock); 14566 14567 } else if (dofhp != NULL) { 14568 /* 14569 * If the dtrace module is loaded and we have a particular 14570 * helper provider description, pass that off to the 14571 * meta provider. 14572 */ 14573 14574 mutex_exit(&dtrace_lock); 14575 14576 dtrace_helper_provide(dofhp, p->p_pid); 14577 14578 } else { 14579 /* 14580 * Otherwise, just pass all the helper provider descriptions 14581 * off to the meta provider. 14582 */ 14583 14584 int i; 14585 mutex_exit(&dtrace_lock); 14586 14587 for (i = 0; i < help->dthps_nprovs; i++) { 14588 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14589 p->p_pid); 14590 } 14591 } 14592 14593 mutex_exit(&dtrace_meta_lock); 14594 } 14595 14596 static int 14597 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14598 { 14599 dtrace_helpers_t *help; 14600 dtrace_helper_provider_t *hprov, **tmp_provs; 14601 uint_t tmp_maxprovs, i; 14602 14603 ASSERT(MUTEX_HELD(&dtrace_lock)); 14604 14605 help = curproc->p_dtrace_helpers; 14606 ASSERT(help != NULL); 14607 14608 /* 14609 * If we already have dtrace_helper_providers_max helper providers, 14610 * we're refuse to add a new one. 14611 */ 14612 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14613 return (ENOSPC); 14614 14615 /* 14616 * Check to make sure this isn't a duplicate. 14617 */ 14618 for (i = 0; i < help->dthps_nprovs; i++) { 14619 if (dofhp->dofhp_addr == 14620 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14621 return (EALREADY); 14622 } 14623 14624 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14625 hprov->dthp_prov = *dofhp; 14626 hprov->dthp_ref = 1; 14627 hprov->dthp_generation = gen; 14628 14629 /* 14630 * Allocate a bigger table for helper providers if it's already full. 14631 */ 14632 if (help->dthps_maxprovs == help->dthps_nprovs) { 14633 tmp_maxprovs = help->dthps_maxprovs; 14634 tmp_provs = help->dthps_provs; 14635 14636 if (help->dthps_maxprovs == 0) 14637 help->dthps_maxprovs = 2; 14638 else 14639 help->dthps_maxprovs *= 2; 14640 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14641 help->dthps_maxprovs = dtrace_helper_providers_max; 14642 14643 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14644 14645 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14646 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14647 14648 if (tmp_provs != NULL) { 14649 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14650 sizeof (dtrace_helper_provider_t *)); 14651 kmem_free(tmp_provs, tmp_maxprovs * 14652 sizeof (dtrace_helper_provider_t *)); 14653 } 14654 } 14655 14656 help->dthps_provs[help->dthps_nprovs] = hprov; 14657 help->dthps_nprovs++; 14658 14659 return (0); 14660 } 14661 14662 static void 14663 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14664 { 14665 mutex_enter(&dtrace_lock); 14666 14667 if (--hprov->dthp_ref == 0) { 14668 dof_hdr_t *dof; 14669 mutex_exit(&dtrace_lock); 14670 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14671 dtrace_dof_destroy(dof); 14672 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14673 } else { 14674 mutex_exit(&dtrace_lock); 14675 } 14676 } 14677 14678 static int 14679 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14680 { 14681 uintptr_t daddr = (uintptr_t)dof; 14682 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14683 dof_provider_t *provider; 14684 dof_probe_t *probe; 14685 uint8_t *arg; 14686 char *strtab, *typestr; 14687 dof_stridx_t typeidx; 14688 size_t typesz; 14689 uint_t nprobes, j, k; 14690 14691 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14692 14693 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14694 dtrace_dof_error(dof, "misaligned section offset"); 14695 return (-1); 14696 } 14697 14698 /* 14699 * The section needs to be large enough to contain the DOF provider 14700 * structure appropriate for the given version. 14701 */ 14702 if (sec->dofs_size < 14703 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14704 offsetof(dof_provider_t, dofpv_prenoffs) : 14705 sizeof (dof_provider_t))) { 14706 dtrace_dof_error(dof, "provider section too small"); 14707 return (-1); 14708 } 14709 14710 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14711 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14712 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14713 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14714 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14715 14716 if (str_sec == NULL || prb_sec == NULL || 14717 arg_sec == NULL || off_sec == NULL) 14718 return (-1); 14719 14720 enoff_sec = NULL; 14721 14722 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14723 provider->dofpv_prenoffs != DOF_SECT_NONE && 14724 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14725 provider->dofpv_prenoffs)) == NULL) 14726 return (-1); 14727 14728 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14729 14730 if (provider->dofpv_name >= str_sec->dofs_size || 14731 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14732 dtrace_dof_error(dof, "invalid provider name"); 14733 return (-1); 14734 } 14735 14736 if (prb_sec->dofs_entsize == 0 || 14737 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14738 dtrace_dof_error(dof, "invalid entry size"); 14739 return (-1); 14740 } 14741 14742 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14743 dtrace_dof_error(dof, "misaligned entry size"); 14744 return (-1); 14745 } 14746 14747 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14748 dtrace_dof_error(dof, "invalid entry size"); 14749 return (-1); 14750 } 14751 14752 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14753 dtrace_dof_error(dof, "misaligned section offset"); 14754 return (-1); 14755 } 14756 14757 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14758 dtrace_dof_error(dof, "invalid entry size"); 14759 return (-1); 14760 } 14761 14762 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14763 14764 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14765 14766 /* 14767 * Take a pass through the probes to check for errors. 14768 */ 14769 for (j = 0; j < nprobes; j++) { 14770 probe = (dof_probe_t *)(uintptr_t)(daddr + 14771 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14772 14773 if (probe->dofpr_func >= str_sec->dofs_size) { 14774 dtrace_dof_error(dof, "invalid function name"); 14775 return (-1); 14776 } 14777 14778 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14779 dtrace_dof_error(dof, "function name too long"); 14780 return (-1); 14781 } 14782 14783 if (probe->dofpr_name >= str_sec->dofs_size || 14784 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14785 dtrace_dof_error(dof, "invalid probe name"); 14786 return (-1); 14787 } 14788 14789 /* 14790 * The offset count must not wrap the index, and the offsets 14791 * must also not overflow the section's data. 14792 */ 14793 if (probe->dofpr_offidx + probe->dofpr_noffs < 14794 probe->dofpr_offidx || 14795 (probe->dofpr_offidx + probe->dofpr_noffs) * 14796 off_sec->dofs_entsize > off_sec->dofs_size) { 14797 dtrace_dof_error(dof, "invalid probe offset"); 14798 return (-1); 14799 } 14800 14801 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14802 /* 14803 * If there's no is-enabled offset section, make sure 14804 * there aren't any is-enabled offsets. Otherwise 14805 * perform the same checks as for probe offsets 14806 * (immediately above). 14807 */ 14808 if (enoff_sec == NULL) { 14809 if (probe->dofpr_enoffidx != 0 || 14810 probe->dofpr_nenoffs != 0) { 14811 dtrace_dof_error(dof, "is-enabled " 14812 "offsets with null section"); 14813 return (-1); 14814 } 14815 } else if (probe->dofpr_enoffidx + 14816 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14817 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14818 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14819 dtrace_dof_error(dof, "invalid is-enabled " 14820 "offset"); 14821 return (-1); 14822 } 14823 14824 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14825 dtrace_dof_error(dof, "zero probe and " 14826 "is-enabled offsets"); 14827 return (-1); 14828 } 14829 } else if (probe->dofpr_noffs == 0) { 14830 dtrace_dof_error(dof, "zero probe offsets"); 14831 return (-1); 14832 } 14833 14834 if (probe->dofpr_argidx + probe->dofpr_xargc < 14835 probe->dofpr_argidx || 14836 (probe->dofpr_argidx + probe->dofpr_xargc) * 14837 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14838 dtrace_dof_error(dof, "invalid args"); 14839 return (-1); 14840 } 14841 14842 typeidx = probe->dofpr_nargv; 14843 typestr = strtab + probe->dofpr_nargv; 14844 for (k = 0; k < probe->dofpr_nargc; k++) { 14845 if (typeidx >= str_sec->dofs_size) { 14846 dtrace_dof_error(dof, "bad " 14847 "native argument type"); 14848 return (-1); 14849 } 14850 14851 typesz = strlen(typestr) + 1; 14852 if (typesz > DTRACE_ARGTYPELEN) { 14853 dtrace_dof_error(dof, "native " 14854 "argument type too long"); 14855 return (-1); 14856 } 14857 typeidx += typesz; 14858 typestr += typesz; 14859 } 14860 14861 typeidx = probe->dofpr_xargv; 14862 typestr = strtab + probe->dofpr_xargv; 14863 for (k = 0; k < probe->dofpr_xargc; k++) { 14864 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14865 dtrace_dof_error(dof, "bad " 14866 "native argument index"); 14867 return (-1); 14868 } 14869 14870 if (typeidx >= str_sec->dofs_size) { 14871 dtrace_dof_error(dof, "bad " 14872 "translated argument type"); 14873 return (-1); 14874 } 14875 14876 typesz = strlen(typestr) + 1; 14877 if (typesz > DTRACE_ARGTYPELEN) { 14878 dtrace_dof_error(dof, "translated argument " 14879 "type too long"); 14880 return (-1); 14881 } 14882 14883 typeidx += typesz; 14884 typestr += typesz; 14885 } 14886 } 14887 14888 return (0); 14889 } 14890 14891 static int 14892 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14893 { 14894 dtrace_helpers_t *help; 14895 dtrace_vstate_t *vstate; 14896 dtrace_enabling_t *enab = NULL; 14897 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14898 uintptr_t daddr = (uintptr_t)dof; 14899 14900 ASSERT(MUTEX_HELD(&dtrace_lock)); 14901 14902 if ((help = curproc->p_dtrace_helpers) == NULL) 14903 help = dtrace_helpers_create(curproc); 14904 14905 vstate = &help->dthps_vstate; 14906 14907 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14908 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14909 dtrace_dof_destroy(dof); 14910 return (rv); 14911 } 14912 14913 /* 14914 * Look for helper providers and validate their descriptions. 14915 */ 14916 if (dhp != NULL) { 14917 for (i = 0; i < dof->dofh_secnum; i++) { 14918 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14919 dof->dofh_secoff + i * dof->dofh_secsize); 14920 14921 if (sec->dofs_type != DOF_SECT_PROVIDER) 14922 continue; 14923 14924 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14925 dtrace_enabling_destroy(enab); 14926 dtrace_dof_destroy(dof); 14927 return (-1); 14928 } 14929 14930 nprovs++; 14931 } 14932 } 14933 14934 /* 14935 * Now we need to walk through the ECB descriptions in the enabling. 14936 */ 14937 for (i = 0; i < enab->dten_ndesc; i++) { 14938 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14939 dtrace_probedesc_t *desc = &ep->dted_probe; 14940 14941 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14942 continue; 14943 14944 if (strcmp(desc->dtpd_mod, "helper") != 0) 14945 continue; 14946 14947 if (strcmp(desc->dtpd_func, "ustack") != 0) 14948 continue; 14949 14950 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14951 ep)) != 0) { 14952 /* 14953 * Adding this helper action failed -- we are now going 14954 * to rip out the entire generation and return failure. 14955 */ 14956 (void) dtrace_helper_destroygen(help->dthps_generation); 14957 dtrace_enabling_destroy(enab); 14958 dtrace_dof_destroy(dof); 14959 return (-1); 14960 } 14961 14962 nhelpers++; 14963 } 14964 14965 if (nhelpers < enab->dten_ndesc) 14966 dtrace_dof_error(dof, "unmatched helpers"); 14967 14968 gen = help->dthps_generation++; 14969 dtrace_enabling_destroy(enab); 14970 14971 if (dhp != NULL && nprovs > 0) { 14972 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14973 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14974 mutex_exit(&dtrace_lock); 14975 dtrace_helper_provider_register(curproc, help, dhp); 14976 mutex_enter(&dtrace_lock); 14977 14978 destroy = 0; 14979 } 14980 } 14981 14982 if (destroy) 14983 dtrace_dof_destroy(dof); 14984 14985 return (gen); 14986 } 14987 14988 static dtrace_helpers_t * 14989 dtrace_helpers_create(proc_t *p) 14990 { 14991 dtrace_helpers_t *help; 14992 14993 ASSERT(MUTEX_HELD(&dtrace_lock)); 14994 ASSERT(p->p_dtrace_helpers == NULL); 14995 14996 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14997 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14998 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14999 15000 p->p_dtrace_helpers = help; 15001 dtrace_helpers++; 15002 15003 return (help); 15004 } 15005 15006 #if defined(sun) 15007 static 15008 #endif 15009 void 15010 dtrace_helpers_destroy(proc_t *p) 15011 { 15012 dtrace_helpers_t *help; 15013 dtrace_vstate_t *vstate; 15014 #if defined(sun) 15015 proc_t *p = curproc; 15016 #endif 15017 int i; 15018 15019 mutex_enter(&dtrace_lock); 15020 15021 ASSERT(p->p_dtrace_helpers != NULL); 15022 ASSERT(dtrace_helpers > 0); 15023 15024 help = p->p_dtrace_helpers; 15025 vstate = &help->dthps_vstate; 15026 15027 /* 15028 * We're now going to lose the help from this process. 15029 */ 15030 p->p_dtrace_helpers = NULL; 15031 dtrace_sync(); 15032 15033 /* 15034 * Destory the helper actions. 15035 */ 15036 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15037 dtrace_helper_action_t *h, *next; 15038 15039 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15040 next = h->dtha_next; 15041 dtrace_helper_action_destroy(h, vstate); 15042 h = next; 15043 } 15044 } 15045 15046 mutex_exit(&dtrace_lock); 15047 15048 /* 15049 * Destroy the helper providers. 15050 */ 15051 if (help->dthps_maxprovs > 0) { 15052 mutex_enter(&dtrace_meta_lock); 15053 if (dtrace_meta_pid != NULL) { 15054 ASSERT(dtrace_deferred_pid == NULL); 15055 15056 for (i = 0; i < help->dthps_nprovs; i++) { 15057 dtrace_helper_provider_remove( 15058 &help->dthps_provs[i]->dthp_prov, p->p_pid); 15059 } 15060 } else { 15061 mutex_enter(&dtrace_lock); 15062 ASSERT(help->dthps_deferred == 0 || 15063 help->dthps_next != NULL || 15064 help->dthps_prev != NULL || 15065 help == dtrace_deferred_pid); 15066 15067 /* 15068 * Remove the helper from the deferred list. 15069 */ 15070 if (help->dthps_next != NULL) 15071 help->dthps_next->dthps_prev = help->dthps_prev; 15072 if (help->dthps_prev != NULL) 15073 help->dthps_prev->dthps_next = help->dthps_next; 15074 if (dtrace_deferred_pid == help) { 15075 dtrace_deferred_pid = help->dthps_next; 15076 ASSERT(help->dthps_prev == NULL); 15077 } 15078 15079 mutex_exit(&dtrace_lock); 15080 } 15081 15082 mutex_exit(&dtrace_meta_lock); 15083 15084 for (i = 0; i < help->dthps_nprovs; i++) { 15085 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15086 } 15087 15088 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15089 sizeof (dtrace_helper_provider_t *)); 15090 } 15091 15092 mutex_enter(&dtrace_lock); 15093 15094 dtrace_vstate_fini(&help->dthps_vstate); 15095 kmem_free(help->dthps_actions, 15096 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15097 kmem_free(help, sizeof (dtrace_helpers_t)); 15098 15099 --dtrace_helpers; 15100 mutex_exit(&dtrace_lock); 15101 } 15102 15103 #if defined(sun) 15104 static 15105 #endif 15106 void 15107 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15108 { 15109 dtrace_helpers_t *help, *newhelp; 15110 dtrace_helper_action_t *helper, *new, *last; 15111 dtrace_difo_t *dp; 15112 dtrace_vstate_t *vstate; 15113 int i, j, sz, hasprovs = 0; 15114 15115 mutex_enter(&dtrace_lock); 15116 ASSERT(from->p_dtrace_helpers != NULL); 15117 ASSERT(dtrace_helpers > 0); 15118 15119 help = from->p_dtrace_helpers; 15120 newhelp = dtrace_helpers_create(to); 15121 ASSERT(to->p_dtrace_helpers != NULL); 15122 15123 newhelp->dthps_generation = help->dthps_generation; 15124 vstate = &newhelp->dthps_vstate; 15125 15126 /* 15127 * Duplicate the helper actions. 15128 */ 15129 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15130 if ((helper = help->dthps_actions[i]) == NULL) 15131 continue; 15132 15133 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15134 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15135 KM_SLEEP); 15136 new->dtha_generation = helper->dtha_generation; 15137 15138 if ((dp = helper->dtha_predicate) != NULL) { 15139 dp = dtrace_difo_duplicate(dp, vstate); 15140 new->dtha_predicate = dp; 15141 } 15142 15143 new->dtha_nactions = helper->dtha_nactions; 15144 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15145 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15146 15147 for (j = 0; j < new->dtha_nactions; j++) { 15148 dtrace_difo_t *dp = helper->dtha_actions[j]; 15149 15150 ASSERT(dp != NULL); 15151 dp = dtrace_difo_duplicate(dp, vstate); 15152 new->dtha_actions[j] = dp; 15153 } 15154 15155 if (last != NULL) { 15156 last->dtha_next = new; 15157 } else { 15158 newhelp->dthps_actions[i] = new; 15159 } 15160 15161 last = new; 15162 } 15163 } 15164 15165 /* 15166 * Duplicate the helper providers and register them with the 15167 * DTrace framework. 15168 */ 15169 if (help->dthps_nprovs > 0) { 15170 newhelp->dthps_nprovs = help->dthps_nprovs; 15171 newhelp->dthps_maxprovs = help->dthps_nprovs; 15172 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15173 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15174 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15175 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15176 newhelp->dthps_provs[i]->dthp_ref++; 15177 } 15178 15179 hasprovs = 1; 15180 } 15181 15182 mutex_exit(&dtrace_lock); 15183 15184 if (hasprovs) 15185 dtrace_helper_provider_register(to, newhelp, NULL); 15186 } 15187 15188 /* 15189 * DTrace Hook Functions 15190 */ 15191 static void 15192 dtrace_module_loaded(modctl_t *ctl) 15193 { 15194 dtrace_provider_t *prv; 15195 15196 mutex_enter(&dtrace_provider_lock); 15197 #if defined(sun) 15198 mutex_enter(&mod_lock); 15199 #endif 15200 15201 #if defined(sun) 15202 ASSERT(ctl->mod_busy); 15203 #endif 15204 15205 /* 15206 * We're going to call each providers per-module provide operation 15207 * specifying only this module. 15208 */ 15209 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15210 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15211 15212 #if defined(sun) 15213 mutex_exit(&mod_lock); 15214 #endif 15215 mutex_exit(&dtrace_provider_lock); 15216 15217 /* 15218 * If we have any retained enablings, we need to match against them. 15219 * Enabling probes requires that cpu_lock be held, and we cannot hold 15220 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15221 * module. (In particular, this happens when loading scheduling 15222 * classes.) So if we have any retained enablings, we need to dispatch 15223 * our task queue to do the match for us. 15224 */ 15225 mutex_enter(&dtrace_lock); 15226 15227 if (dtrace_retained == NULL) { 15228 mutex_exit(&dtrace_lock); 15229 return; 15230 } 15231 15232 (void) taskq_dispatch(dtrace_taskq, 15233 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15234 15235 mutex_exit(&dtrace_lock); 15236 15237 /* 15238 * And now, for a little heuristic sleaze: in general, we want to 15239 * match modules as soon as they load. However, we cannot guarantee 15240 * this, because it would lead us to the lock ordering violation 15241 * outlined above. The common case, of course, is that cpu_lock is 15242 * _not_ held -- so we delay here for a clock tick, hoping that that's 15243 * long enough for the task queue to do its work. If it's not, it's 15244 * not a serious problem -- it just means that the module that we 15245 * just loaded may not be immediately instrumentable. 15246 */ 15247 delay(1); 15248 } 15249 15250 static void 15251 #if defined(sun) 15252 dtrace_module_unloaded(modctl_t *ctl) 15253 #else 15254 dtrace_module_unloaded(modctl_t *ctl, int *error) 15255 #endif 15256 { 15257 dtrace_probe_t template, *probe, *first, *next; 15258 dtrace_provider_t *prov; 15259 #if !defined(sun) 15260 char modname[DTRACE_MODNAMELEN]; 15261 size_t len; 15262 #endif 15263 15264 #if defined(sun) 15265 template.dtpr_mod = ctl->mod_modname; 15266 #else 15267 /* Handle the fact that ctl->filename may end in ".ko". */ 15268 strlcpy(modname, ctl->filename, sizeof(modname)); 15269 len = strlen(ctl->filename); 15270 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 15271 modname[len - 3] = '\0'; 15272 template.dtpr_mod = modname; 15273 #endif 15274 15275 mutex_enter(&dtrace_provider_lock); 15276 #if defined(sun) 15277 mutex_enter(&mod_lock); 15278 #endif 15279 mutex_enter(&dtrace_lock); 15280 15281 #if !defined(sun) 15282 if (ctl->nenabled > 0) { 15283 /* Don't allow unloads if a probe is enabled. */ 15284 mutex_exit(&dtrace_provider_lock); 15285 mutex_exit(&dtrace_lock); 15286 *error = -1; 15287 printf( 15288 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 15289 return; 15290 } 15291 #endif 15292 15293 if (dtrace_bymod == NULL) { 15294 /* 15295 * The DTrace module is loaded (obviously) but not attached; 15296 * we don't have any work to do. 15297 */ 15298 mutex_exit(&dtrace_provider_lock); 15299 #if defined(sun) 15300 mutex_exit(&mod_lock); 15301 #endif 15302 mutex_exit(&dtrace_lock); 15303 return; 15304 } 15305 15306 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15307 probe != NULL; probe = probe->dtpr_nextmod) { 15308 if (probe->dtpr_ecb != NULL) { 15309 mutex_exit(&dtrace_provider_lock); 15310 #if defined(sun) 15311 mutex_exit(&mod_lock); 15312 #endif 15313 mutex_exit(&dtrace_lock); 15314 15315 /* 15316 * This shouldn't _actually_ be possible -- we're 15317 * unloading a module that has an enabled probe in it. 15318 * (It's normally up to the provider to make sure that 15319 * this can't happen.) However, because dtps_enable() 15320 * doesn't have a failure mode, there can be an 15321 * enable/unload race. Upshot: we don't want to 15322 * assert, but we're not going to disable the 15323 * probe, either. 15324 */ 15325 if (dtrace_err_verbose) { 15326 #if defined(sun) 15327 cmn_err(CE_WARN, "unloaded module '%s' had " 15328 "enabled probes", ctl->mod_modname); 15329 #else 15330 cmn_err(CE_WARN, "unloaded module '%s' had " 15331 "enabled probes", modname); 15332 #endif 15333 } 15334 15335 return; 15336 } 15337 } 15338 15339 probe = first; 15340 15341 for (first = NULL; probe != NULL; probe = next) { 15342 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15343 15344 dtrace_probes[probe->dtpr_id - 1] = NULL; 15345 15346 next = probe->dtpr_nextmod; 15347 dtrace_hash_remove(dtrace_bymod, probe); 15348 dtrace_hash_remove(dtrace_byfunc, probe); 15349 dtrace_hash_remove(dtrace_byname, probe); 15350 15351 if (first == NULL) { 15352 first = probe; 15353 probe->dtpr_nextmod = NULL; 15354 } else { 15355 probe->dtpr_nextmod = first; 15356 first = probe; 15357 } 15358 } 15359 15360 /* 15361 * We've removed all of the module's probes from the hash chains and 15362 * from the probe array. Now issue a dtrace_sync() to be sure that 15363 * everyone has cleared out from any probe array processing. 15364 */ 15365 dtrace_sync(); 15366 15367 for (probe = first; probe != NULL; probe = first) { 15368 first = probe->dtpr_nextmod; 15369 prov = probe->dtpr_provider; 15370 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15371 probe->dtpr_arg); 15372 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15373 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15374 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15375 #if defined(sun) 15376 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15377 #else 15378 free_unr(dtrace_arena, probe->dtpr_id); 15379 #endif 15380 kmem_free(probe, sizeof (dtrace_probe_t)); 15381 } 15382 15383 mutex_exit(&dtrace_lock); 15384 #if defined(sun) 15385 mutex_exit(&mod_lock); 15386 #endif 15387 mutex_exit(&dtrace_provider_lock); 15388 } 15389 15390 #if !defined(sun) 15391 static void 15392 dtrace_kld_load(void *arg __unused, linker_file_t lf) 15393 { 15394 15395 dtrace_module_loaded(lf); 15396 } 15397 15398 static void 15399 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 15400 { 15401 15402 if (*error != 0) 15403 /* We already have an error, so don't do anything. */ 15404 return; 15405 dtrace_module_unloaded(lf, error); 15406 } 15407 #endif 15408 15409 #if defined(sun) 15410 static void 15411 dtrace_suspend(void) 15412 { 15413 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15414 } 15415 15416 static void 15417 dtrace_resume(void) 15418 { 15419 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15420 } 15421 #endif 15422 15423 static int 15424 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15425 { 15426 ASSERT(MUTEX_HELD(&cpu_lock)); 15427 mutex_enter(&dtrace_lock); 15428 15429 switch (what) { 15430 case CPU_CONFIG: { 15431 dtrace_state_t *state; 15432 dtrace_optval_t *opt, rs, c; 15433 15434 /* 15435 * For now, we only allocate a new buffer for anonymous state. 15436 */ 15437 if ((state = dtrace_anon.dta_state) == NULL) 15438 break; 15439 15440 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15441 break; 15442 15443 opt = state->dts_options; 15444 c = opt[DTRACEOPT_CPU]; 15445 15446 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15447 break; 15448 15449 /* 15450 * Regardless of what the actual policy is, we're going to 15451 * temporarily set our resize policy to be manual. We're 15452 * also going to temporarily set our CPU option to denote 15453 * the newly configured CPU. 15454 */ 15455 rs = opt[DTRACEOPT_BUFRESIZE]; 15456 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15457 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15458 15459 (void) dtrace_state_buffers(state); 15460 15461 opt[DTRACEOPT_BUFRESIZE] = rs; 15462 opt[DTRACEOPT_CPU] = c; 15463 15464 break; 15465 } 15466 15467 case CPU_UNCONFIG: 15468 /* 15469 * We don't free the buffer in the CPU_UNCONFIG case. (The 15470 * buffer will be freed when the consumer exits.) 15471 */ 15472 break; 15473 15474 default: 15475 break; 15476 } 15477 15478 mutex_exit(&dtrace_lock); 15479 return (0); 15480 } 15481 15482 #if defined(sun) 15483 static void 15484 dtrace_cpu_setup_initial(processorid_t cpu) 15485 { 15486 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15487 } 15488 #endif 15489 15490 static void 15491 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15492 { 15493 if (dtrace_toxranges >= dtrace_toxranges_max) { 15494 int osize, nsize; 15495 dtrace_toxrange_t *range; 15496 15497 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15498 15499 if (osize == 0) { 15500 ASSERT(dtrace_toxrange == NULL); 15501 ASSERT(dtrace_toxranges_max == 0); 15502 dtrace_toxranges_max = 1; 15503 } else { 15504 dtrace_toxranges_max <<= 1; 15505 } 15506 15507 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15508 range = kmem_zalloc(nsize, KM_SLEEP); 15509 15510 if (dtrace_toxrange != NULL) { 15511 ASSERT(osize != 0); 15512 bcopy(dtrace_toxrange, range, osize); 15513 kmem_free(dtrace_toxrange, osize); 15514 } 15515 15516 dtrace_toxrange = range; 15517 } 15518 15519 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15520 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15521 15522 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15523 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15524 dtrace_toxranges++; 15525 } 15526 15527 /* 15528 * DTrace Driver Cookbook Functions 15529 */ 15530 #if defined(sun) 15531 /*ARGSUSED*/ 15532 static int 15533 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15534 { 15535 dtrace_provider_id_t id; 15536 dtrace_state_t *state = NULL; 15537 dtrace_enabling_t *enab; 15538 15539 mutex_enter(&cpu_lock); 15540 mutex_enter(&dtrace_provider_lock); 15541 mutex_enter(&dtrace_lock); 15542 15543 if (ddi_soft_state_init(&dtrace_softstate, 15544 sizeof (dtrace_state_t), 0) != 0) { 15545 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15546 mutex_exit(&cpu_lock); 15547 mutex_exit(&dtrace_provider_lock); 15548 mutex_exit(&dtrace_lock); 15549 return (DDI_FAILURE); 15550 } 15551 15552 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15553 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15554 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15555 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15556 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15557 ddi_remove_minor_node(devi, NULL); 15558 ddi_soft_state_fini(&dtrace_softstate); 15559 mutex_exit(&cpu_lock); 15560 mutex_exit(&dtrace_provider_lock); 15561 mutex_exit(&dtrace_lock); 15562 return (DDI_FAILURE); 15563 } 15564 15565 ddi_report_dev(devi); 15566 dtrace_devi = devi; 15567 15568 dtrace_modload = dtrace_module_loaded; 15569 dtrace_modunload = dtrace_module_unloaded; 15570 dtrace_cpu_init = dtrace_cpu_setup_initial; 15571 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15572 dtrace_helpers_fork = dtrace_helpers_duplicate; 15573 dtrace_cpustart_init = dtrace_suspend; 15574 dtrace_cpustart_fini = dtrace_resume; 15575 dtrace_debugger_init = dtrace_suspend; 15576 dtrace_debugger_fini = dtrace_resume; 15577 15578 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15579 15580 ASSERT(MUTEX_HELD(&cpu_lock)); 15581 15582 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15583 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15584 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15585 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15586 VM_SLEEP | VMC_IDENTIFIER); 15587 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15588 1, INT_MAX, 0); 15589 15590 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15591 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15592 NULL, NULL, NULL, NULL, NULL, 0); 15593 15594 ASSERT(MUTEX_HELD(&cpu_lock)); 15595 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15596 offsetof(dtrace_probe_t, dtpr_nextmod), 15597 offsetof(dtrace_probe_t, dtpr_prevmod)); 15598 15599 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15600 offsetof(dtrace_probe_t, dtpr_nextfunc), 15601 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15602 15603 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15604 offsetof(dtrace_probe_t, dtpr_nextname), 15605 offsetof(dtrace_probe_t, dtpr_prevname)); 15606 15607 if (dtrace_retain_max < 1) { 15608 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15609 "setting to 1", dtrace_retain_max); 15610 dtrace_retain_max = 1; 15611 } 15612 15613 /* 15614 * Now discover our toxic ranges. 15615 */ 15616 dtrace_toxic_ranges(dtrace_toxrange_add); 15617 15618 /* 15619 * Before we register ourselves as a provider to our own framework, 15620 * we would like to assert that dtrace_provider is NULL -- but that's 15621 * not true if we were loaded as a dependency of a DTrace provider. 15622 * Once we've registered, we can assert that dtrace_provider is our 15623 * pseudo provider. 15624 */ 15625 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15626 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15627 15628 ASSERT(dtrace_provider != NULL); 15629 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15630 15631 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15632 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15633 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15634 dtrace_provider, NULL, NULL, "END", 0, NULL); 15635 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15636 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15637 15638 dtrace_anon_property(); 15639 mutex_exit(&cpu_lock); 15640 15641 /* 15642 * If DTrace helper tracing is enabled, we need to allocate the 15643 * trace buffer and initialize the values. 15644 */ 15645 if (dtrace_helptrace_enabled) { 15646 ASSERT(dtrace_helptrace_buffer == NULL); 15647 dtrace_helptrace_buffer = 15648 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15649 dtrace_helptrace_next = 0; 15650 } 15651 15652 /* 15653 * If there are already providers, we must ask them to provide their 15654 * probes, and then match any anonymous enabling against them. Note 15655 * that there should be no other retained enablings at this time: 15656 * the only retained enablings at this time should be the anonymous 15657 * enabling. 15658 */ 15659 if (dtrace_anon.dta_enabling != NULL) { 15660 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15661 15662 dtrace_enabling_provide(NULL); 15663 state = dtrace_anon.dta_state; 15664 15665 /* 15666 * We couldn't hold cpu_lock across the above call to 15667 * dtrace_enabling_provide(), but we must hold it to actually 15668 * enable the probes. We have to drop all of our locks, pick 15669 * up cpu_lock, and regain our locks before matching the 15670 * retained anonymous enabling. 15671 */ 15672 mutex_exit(&dtrace_lock); 15673 mutex_exit(&dtrace_provider_lock); 15674 15675 mutex_enter(&cpu_lock); 15676 mutex_enter(&dtrace_provider_lock); 15677 mutex_enter(&dtrace_lock); 15678 15679 if ((enab = dtrace_anon.dta_enabling) != NULL) 15680 (void) dtrace_enabling_match(enab, NULL); 15681 15682 mutex_exit(&cpu_lock); 15683 } 15684 15685 mutex_exit(&dtrace_lock); 15686 mutex_exit(&dtrace_provider_lock); 15687 15688 if (state != NULL) { 15689 /* 15690 * If we created any anonymous state, set it going now. 15691 */ 15692 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15693 } 15694 15695 return (DDI_SUCCESS); 15696 } 15697 #endif 15698 15699 #if !defined(sun) 15700 #if __FreeBSD_version >= 800039 15701 static void dtrace_dtr(void *); 15702 #endif 15703 #endif 15704 15705 /*ARGSUSED*/ 15706 static int 15707 #if defined(sun) 15708 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15709 #else 15710 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15711 #endif 15712 { 15713 dtrace_state_t *state; 15714 uint32_t priv; 15715 uid_t uid; 15716 zoneid_t zoneid; 15717 15718 #if defined(sun) 15719 if (getminor(*devp) == DTRACEMNRN_HELPER) 15720 return (0); 15721 15722 /* 15723 * If this wasn't an open with the "helper" minor, then it must be 15724 * the "dtrace" minor. 15725 */ 15726 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15727 #else 15728 cred_t *cred_p = NULL; 15729 15730 #if __FreeBSD_version < 800039 15731 /* 15732 * The first minor device is the one that is cloned so there is 15733 * nothing more to do here. 15734 */ 15735 if (dev2unit(dev) == 0) 15736 return 0; 15737 15738 /* 15739 * Devices are cloned, so if the DTrace state has already 15740 * been allocated, that means this device belongs to a 15741 * different client. Each client should open '/dev/dtrace' 15742 * to get a cloned device. 15743 */ 15744 if (dev->si_drv1 != NULL) 15745 return (EBUSY); 15746 #endif 15747 15748 cred_p = dev->si_cred; 15749 #endif 15750 15751 /* 15752 * If no DTRACE_PRIV_* bits are set in the credential, then the 15753 * caller lacks sufficient permission to do anything with DTrace. 15754 */ 15755 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15756 if (priv == DTRACE_PRIV_NONE) { 15757 #if !defined(sun) 15758 #if __FreeBSD_version < 800039 15759 /* Destroy the cloned device. */ 15760 destroy_dev(dev); 15761 #endif 15762 #endif 15763 15764 return (EACCES); 15765 } 15766 15767 /* 15768 * Ask all providers to provide all their probes. 15769 */ 15770 mutex_enter(&dtrace_provider_lock); 15771 dtrace_probe_provide(NULL, NULL); 15772 mutex_exit(&dtrace_provider_lock); 15773 15774 mutex_enter(&cpu_lock); 15775 mutex_enter(&dtrace_lock); 15776 dtrace_opens++; 15777 dtrace_membar_producer(); 15778 15779 #if defined(sun) 15780 /* 15781 * If the kernel debugger is active (that is, if the kernel debugger 15782 * modified text in some way), we won't allow the open. 15783 */ 15784 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15785 dtrace_opens--; 15786 mutex_exit(&cpu_lock); 15787 mutex_exit(&dtrace_lock); 15788 return (EBUSY); 15789 } 15790 15791 state = dtrace_state_create(devp, cred_p); 15792 #else 15793 state = dtrace_state_create(dev); 15794 #if __FreeBSD_version < 800039 15795 dev->si_drv1 = state; 15796 #else 15797 devfs_set_cdevpriv(state, dtrace_dtr); 15798 #endif 15799 #endif 15800 15801 mutex_exit(&cpu_lock); 15802 15803 if (state == NULL) { 15804 #if defined(sun) 15805 if (--dtrace_opens == 0) 15806 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15807 #else 15808 --dtrace_opens; 15809 #endif 15810 mutex_exit(&dtrace_lock); 15811 #if !defined(sun) 15812 #if __FreeBSD_version < 800039 15813 /* Destroy the cloned device. */ 15814 destroy_dev(dev); 15815 #endif 15816 #endif 15817 return (EAGAIN); 15818 } 15819 15820 mutex_exit(&dtrace_lock); 15821 15822 return (0); 15823 } 15824 15825 /*ARGSUSED*/ 15826 #if defined(sun) 15827 static int 15828 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15829 #elif __FreeBSD_version < 800039 15830 static int 15831 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15832 #else 15833 static void 15834 dtrace_dtr(void *data) 15835 #endif 15836 { 15837 #if defined(sun) 15838 minor_t minor = getminor(dev); 15839 dtrace_state_t *state; 15840 15841 if (minor == DTRACEMNRN_HELPER) 15842 return (0); 15843 15844 state = ddi_get_soft_state(dtrace_softstate, minor); 15845 #else 15846 #if __FreeBSD_version < 800039 15847 dtrace_state_t *state = dev->si_drv1; 15848 15849 /* Check if this is not a cloned device. */ 15850 if (dev2unit(dev) == 0) 15851 return (0); 15852 #else 15853 dtrace_state_t *state = data; 15854 #endif 15855 15856 #endif 15857 15858 mutex_enter(&cpu_lock); 15859 mutex_enter(&dtrace_lock); 15860 15861 if (state != NULL) { 15862 if (state->dts_anon) { 15863 /* 15864 * There is anonymous state. Destroy that first. 15865 */ 15866 ASSERT(dtrace_anon.dta_state == NULL); 15867 dtrace_state_destroy(state->dts_anon); 15868 } 15869 15870 dtrace_state_destroy(state); 15871 15872 #if !defined(sun) 15873 kmem_free(state, 0); 15874 #if __FreeBSD_version < 800039 15875 dev->si_drv1 = NULL; 15876 #endif 15877 #endif 15878 } 15879 15880 ASSERT(dtrace_opens > 0); 15881 #if defined(sun) 15882 if (--dtrace_opens == 0) 15883 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15884 #else 15885 --dtrace_opens; 15886 #endif 15887 15888 mutex_exit(&dtrace_lock); 15889 mutex_exit(&cpu_lock); 15890 15891 #if __FreeBSD_version < 800039 15892 /* Schedule this cloned device to be destroyed. */ 15893 destroy_dev_sched(dev); 15894 #endif 15895 15896 #if defined(sun) || __FreeBSD_version < 800039 15897 return (0); 15898 #endif 15899 } 15900 15901 #if defined(sun) 15902 /*ARGSUSED*/ 15903 static int 15904 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15905 { 15906 int rval; 15907 dof_helper_t help, *dhp = NULL; 15908 15909 switch (cmd) { 15910 case DTRACEHIOC_ADDDOF: 15911 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15912 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15913 return (EFAULT); 15914 } 15915 15916 dhp = &help; 15917 arg = (intptr_t)help.dofhp_dof; 15918 /*FALLTHROUGH*/ 15919 15920 case DTRACEHIOC_ADD: { 15921 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15922 15923 if (dof == NULL) 15924 return (rval); 15925 15926 mutex_enter(&dtrace_lock); 15927 15928 /* 15929 * dtrace_helper_slurp() takes responsibility for the dof -- 15930 * it may free it now or it may save it and free it later. 15931 */ 15932 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15933 *rv = rval; 15934 rval = 0; 15935 } else { 15936 rval = EINVAL; 15937 } 15938 15939 mutex_exit(&dtrace_lock); 15940 return (rval); 15941 } 15942 15943 case DTRACEHIOC_REMOVE: { 15944 mutex_enter(&dtrace_lock); 15945 rval = dtrace_helper_destroygen(arg); 15946 mutex_exit(&dtrace_lock); 15947 15948 return (rval); 15949 } 15950 15951 default: 15952 break; 15953 } 15954 15955 return (ENOTTY); 15956 } 15957 15958 /*ARGSUSED*/ 15959 static int 15960 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15961 { 15962 minor_t minor = getminor(dev); 15963 dtrace_state_t *state; 15964 int rval; 15965 15966 if (minor == DTRACEMNRN_HELPER) 15967 return (dtrace_ioctl_helper(cmd, arg, rv)); 15968 15969 state = ddi_get_soft_state(dtrace_softstate, minor); 15970 15971 if (state->dts_anon) { 15972 ASSERT(dtrace_anon.dta_state == NULL); 15973 state = state->dts_anon; 15974 } 15975 15976 switch (cmd) { 15977 case DTRACEIOC_PROVIDER: { 15978 dtrace_providerdesc_t pvd; 15979 dtrace_provider_t *pvp; 15980 15981 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15982 return (EFAULT); 15983 15984 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15985 mutex_enter(&dtrace_provider_lock); 15986 15987 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15988 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15989 break; 15990 } 15991 15992 mutex_exit(&dtrace_provider_lock); 15993 15994 if (pvp == NULL) 15995 return (ESRCH); 15996 15997 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15998 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15999 16000 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 16001 return (EFAULT); 16002 16003 return (0); 16004 } 16005 16006 case DTRACEIOC_EPROBE: { 16007 dtrace_eprobedesc_t epdesc; 16008 dtrace_ecb_t *ecb; 16009 dtrace_action_t *act; 16010 void *buf; 16011 size_t size; 16012 uintptr_t dest; 16013 int nrecs; 16014 16015 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 16016 return (EFAULT); 16017 16018 mutex_enter(&dtrace_lock); 16019 16020 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 16021 mutex_exit(&dtrace_lock); 16022 return (EINVAL); 16023 } 16024 16025 if (ecb->dte_probe == NULL) { 16026 mutex_exit(&dtrace_lock); 16027 return (EINVAL); 16028 } 16029 16030 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 16031 epdesc.dtepd_uarg = ecb->dte_uarg; 16032 epdesc.dtepd_size = ecb->dte_size; 16033 16034 nrecs = epdesc.dtepd_nrecs; 16035 epdesc.dtepd_nrecs = 0; 16036 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 16037 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 16038 continue; 16039 16040 epdesc.dtepd_nrecs++; 16041 } 16042 16043 /* 16044 * Now that we have the size, we need to allocate a temporary 16045 * buffer in which to store the complete description. We need 16046 * the temporary buffer to be able to drop dtrace_lock() 16047 * across the copyout(), below. 16048 */ 16049 size = sizeof (dtrace_eprobedesc_t) + 16050 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 16051 16052 buf = kmem_alloc(size, KM_SLEEP); 16053 dest = (uintptr_t)buf; 16054 16055 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 16056 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 16057 16058 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 16059 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 16060 continue; 16061 16062 if (nrecs-- == 0) 16063 break; 16064 16065 bcopy(&act->dta_rec, (void *)dest, 16066 sizeof (dtrace_recdesc_t)); 16067 dest += sizeof (dtrace_recdesc_t); 16068 } 16069 16070 mutex_exit(&dtrace_lock); 16071 16072 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16073 kmem_free(buf, size); 16074 return (EFAULT); 16075 } 16076 16077 kmem_free(buf, size); 16078 return (0); 16079 } 16080 16081 case DTRACEIOC_AGGDESC: { 16082 dtrace_aggdesc_t aggdesc; 16083 dtrace_action_t *act; 16084 dtrace_aggregation_t *agg; 16085 int nrecs; 16086 uint32_t offs; 16087 dtrace_recdesc_t *lrec; 16088 void *buf; 16089 size_t size; 16090 uintptr_t dest; 16091 16092 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 16093 return (EFAULT); 16094 16095 mutex_enter(&dtrace_lock); 16096 16097 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 16098 mutex_exit(&dtrace_lock); 16099 return (EINVAL); 16100 } 16101 16102 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 16103 16104 nrecs = aggdesc.dtagd_nrecs; 16105 aggdesc.dtagd_nrecs = 0; 16106 16107 offs = agg->dtag_base; 16108 lrec = &agg->dtag_action.dta_rec; 16109 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 16110 16111 for (act = agg->dtag_first; ; act = act->dta_next) { 16112 ASSERT(act->dta_intuple || 16113 DTRACEACT_ISAGG(act->dta_kind)); 16114 16115 /* 16116 * If this action has a record size of zero, it 16117 * denotes an argument to the aggregating action. 16118 * Because the presence of this record doesn't (or 16119 * shouldn't) affect the way the data is interpreted, 16120 * we don't copy it out to save user-level the 16121 * confusion of dealing with a zero-length record. 16122 */ 16123 if (act->dta_rec.dtrd_size == 0) { 16124 ASSERT(agg->dtag_hasarg); 16125 continue; 16126 } 16127 16128 aggdesc.dtagd_nrecs++; 16129 16130 if (act == &agg->dtag_action) 16131 break; 16132 } 16133 16134 /* 16135 * Now that we have the size, we need to allocate a temporary 16136 * buffer in which to store the complete description. We need 16137 * the temporary buffer to be able to drop dtrace_lock() 16138 * across the copyout(), below. 16139 */ 16140 size = sizeof (dtrace_aggdesc_t) + 16141 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16142 16143 buf = kmem_alloc(size, KM_SLEEP); 16144 dest = (uintptr_t)buf; 16145 16146 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16147 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16148 16149 for (act = agg->dtag_first; ; act = act->dta_next) { 16150 dtrace_recdesc_t rec = act->dta_rec; 16151 16152 /* 16153 * See the comment in the above loop for why we pass 16154 * over zero-length records. 16155 */ 16156 if (rec.dtrd_size == 0) { 16157 ASSERT(agg->dtag_hasarg); 16158 continue; 16159 } 16160 16161 if (nrecs-- == 0) 16162 break; 16163 16164 rec.dtrd_offset -= offs; 16165 bcopy(&rec, (void *)dest, sizeof (rec)); 16166 dest += sizeof (dtrace_recdesc_t); 16167 16168 if (act == &agg->dtag_action) 16169 break; 16170 } 16171 16172 mutex_exit(&dtrace_lock); 16173 16174 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16175 kmem_free(buf, size); 16176 return (EFAULT); 16177 } 16178 16179 kmem_free(buf, size); 16180 return (0); 16181 } 16182 16183 case DTRACEIOC_ENABLE: { 16184 dof_hdr_t *dof; 16185 dtrace_enabling_t *enab = NULL; 16186 dtrace_vstate_t *vstate; 16187 int err = 0; 16188 16189 *rv = 0; 16190 16191 /* 16192 * If a NULL argument has been passed, we take this as our 16193 * cue to reevaluate our enablings. 16194 */ 16195 if (arg == NULL) { 16196 dtrace_enabling_matchall(); 16197 16198 return (0); 16199 } 16200 16201 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16202 return (rval); 16203 16204 mutex_enter(&cpu_lock); 16205 mutex_enter(&dtrace_lock); 16206 vstate = &state->dts_vstate; 16207 16208 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16209 mutex_exit(&dtrace_lock); 16210 mutex_exit(&cpu_lock); 16211 dtrace_dof_destroy(dof); 16212 return (EBUSY); 16213 } 16214 16215 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16216 mutex_exit(&dtrace_lock); 16217 mutex_exit(&cpu_lock); 16218 dtrace_dof_destroy(dof); 16219 return (EINVAL); 16220 } 16221 16222 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16223 dtrace_enabling_destroy(enab); 16224 mutex_exit(&dtrace_lock); 16225 mutex_exit(&cpu_lock); 16226 dtrace_dof_destroy(dof); 16227 return (rval); 16228 } 16229 16230 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16231 err = dtrace_enabling_retain(enab); 16232 } else { 16233 dtrace_enabling_destroy(enab); 16234 } 16235 16236 mutex_exit(&cpu_lock); 16237 mutex_exit(&dtrace_lock); 16238 dtrace_dof_destroy(dof); 16239 16240 return (err); 16241 } 16242 16243 case DTRACEIOC_REPLICATE: { 16244 dtrace_repldesc_t desc; 16245 dtrace_probedesc_t *match = &desc.dtrpd_match; 16246 dtrace_probedesc_t *create = &desc.dtrpd_create; 16247 int err; 16248 16249 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16250 return (EFAULT); 16251 16252 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16253 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16254 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16255 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16256 16257 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16258 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16259 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16260 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16261 16262 mutex_enter(&dtrace_lock); 16263 err = dtrace_enabling_replicate(state, match, create); 16264 mutex_exit(&dtrace_lock); 16265 16266 return (err); 16267 } 16268 16269 case DTRACEIOC_PROBEMATCH: 16270 case DTRACEIOC_PROBES: { 16271 dtrace_probe_t *probe = NULL; 16272 dtrace_probedesc_t desc; 16273 dtrace_probekey_t pkey; 16274 dtrace_id_t i; 16275 int m = 0; 16276 uint32_t priv; 16277 uid_t uid; 16278 zoneid_t zoneid; 16279 16280 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16281 return (EFAULT); 16282 16283 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16284 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16285 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16286 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16287 16288 /* 16289 * Before we attempt to match this probe, we want to give 16290 * all providers the opportunity to provide it. 16291 */ 16292 if (desc.dtpd_id == DTRACE_IDNONE) { 16293 mutex_enter(&dtrace_provider_lock); 16294 dtrace_probe_provide(&desc, NULL); 16295 mutex_exit(&dtrace_provider_lock); 16296 desc.dtpd_id++; 16297 } 16298 16299 if (cmd == DTRACEIOC_PROBEMATCH) { 16300 dtrace_probekey(&desc, &pkey); 16301 pkey.dtpk_id = DTRACE_IDNONE; 16302 } 16303 16304 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16305 16306 mutex_enter(&dtrace_lock); 16307 16308 if (cmd == DTRACEIOC_PROBEMATCH) { 16309 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16310 if ((probe = dtrace_probes[i - 1]) != NULL && 16311 (m = dtrace_match_probe(probe, &pkey, 16312 priv, uid, zoneid)) != 0) 16313 break; 16314 } 16315 16316 if (m < 0) { 16317 mutex_exit(&dtrace_lock); 16318 return (EINVAL); 16319 } 16320 16321 } else { 16322 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16323 if ((probe = dtrace_probes[i - 1]) != NULL && 16324 dtrace_match_priv(probe, priv, uid, zoneid)) 16325 break; 16326 } 16327 } 16328 16329 if (probe == NULL) { 16330 mutex_exit(&dtrace_lock); 16331 return (ESRCH); 16332 } 16333 16334 dtrace_probe_description(probe, &desc); 16335 mutex_exit(&dtrace_lock); 16336 16337 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16338 return (EFAULT); 16339 16340 return (0); 16341 } 16342 16343 case DTRACEIOC_PROBEARG: { 16344 dtrace_argdesc_t desc; 16345 dtrace_probe_t *probe; 16346 dtrace_provider_t *prov; 16347 16348 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16349 return (EFAULT); 16350 16351 if (desc.dtargd_id == DTRACE_IDNONE) 16352 return (EINVAL); 16353 16354 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16355 return (EINVAL); 16356 16357 mutex_enter(&dtrace_provider_lock); 16358 mutex_enter(&mod_lock); 16359 mutex_enter(&dtrace_lock); 16360 16361 if (desc.dtargd_id > dtrace_nprobes) { 16362 mutex_exit(&dtrace_lock); 16363 mutex_exit(&mod_lock); 16364 mutex_exit(&dtrace_provider_lock); 16365 return (EINVAL); 16366 } 16367 16368 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16369 mutex_exit(&dtrace_lock); 16370 mutex_exit(&mod_lock); 16371 mutex_exit(&dtrace_provider_lock); 16372 return (EINVAL); 16373 } 16374 16375 mutex_exit(&dtrace_lock); 16376 16377 prov = probe->dtpr_provider; 16378 16379 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16380 /* 16381 * There isn't any typed information for this probe. 16382 * Set the argument number to DTRACE_ARGNONE. 16383 */ 16384 desc.dtargd_ndx = DTRACE_ARGNONE; 16385 } else { 16386 desc.dtargd_native[0] = '\0'; 16387 desc.dtargd_xlate[0] = '\0'; 16388 desc.dtargd_mapping = desc.dtargd_ndx; 16389 16390 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16391 probe->dtpr_id, probe->dtpr_arg, &desc); 16392 } 16393 16394 mutex_exit(&mod_lock); 16395 mutex_exit(&dtrace_provider_lock); 16396 16397 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16398 return (EFAULT); 16399 16400 return (0); 16401 } 16402 16403 case DTRACEIOC_GO: { 16404 processorid_t cpuid; 16405 rval = dtrace_state_go(state, &cpuid); 16406 16407 if (rval != 0) 16408 return (rval); 16409 16410 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16411 return (EFAULT); 16412 16413 return (0); 16414 } 16415 16416 case DTRACEIOC_STOP: { 16417 processorid_t cpuid; 16418 16419 mutex_enter(&dtrace_lock); 16420 rval = dtrace_state_stop(state, &cpuid); 16421 mutex_exit(&dtrace_lock); 16422 16423 if (rval != 0) 16424 return (rval); 16425 16426 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16427 return (EFAULT); 16428 16429 return (0); 16430 } 16431 16432 case DTRACEIOC_DOFGET: { 16433 dof_hdr_t hdr, *dof; 16434 uint64_t len; 16435 16436 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16437 return (EFAULT); 16438 16439 mutex_enter(&dtrace_lock); 16440 dof = dtrace_dof_create(state); 16441 mutex_exit(&dtrace_lock); 16442 16443 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16444 rval = copyout(dof, (void *)arg, len); 16445 dtrace_dof_destroy(dof); 16446 16447 return (rval == 0 ? 0 : EFAULT); 16448 } 16449 16450 case DTRACEIOC_AGGSNAP: 16451 case DTRACEIOC_BUFSNAP: { 16452 dtrace_bufdesc_t desc; 16453 caddr_t cached; 16454 dtrace_buffer_t *buf; 16455 16456 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16457 return (EFAULT); 16458 16459 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16460 return (EINVAL); 16461 16462 mutex_enter(&dtrace_lock); 16463 16464 if (cmd == DTRACEIOC_BUFSNAP) { 16465 buf = &state->dts_buffer[desc.dtbd_cpu]; 16466 } else { 16467 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16468 } 16469 16470 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16471 size_t sz = buf->dtb_offset; 16472 16473 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16474 mutex_exit(&dtrace_lock); 16475 return (EBUSY); 16476 } 16477 16478 /* 16479 * If this buffer has already been consumed, we're 16480 * going to indicate that there's nothing left here 16481 * to consume. 16482 */ 16483 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16484 mutex_exit(&dtrace_lock); 16485 16486 desc.dtbd_size = 0; 16487 desc.dtbd_drops = 0; 16488 desc.dtbd_errors = 0; 16489 desc.dtbd_oldest = 0; 16490 sz = sizeof (desc); 16491 16492 if (copyout(&desc, (void *)arg, sz) != 0) 16493 return (EFAULT); 16494 16495 return (0); 16496 } 16497 16498 /* 16499 * If this is a ring buffer that has wrapped, we want 16500 * to copy the whole thing out. 16501 */ 16502 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16503 dtrace_buffer_polish(buf); 16504 sz = buf->dtb_size; 16505 } 16506 16507 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16508 mutex_exit(&dtrace_lock); 16509 return (EFAULT); 16510 } 16511 16512 desc.dtbd_size = sz; 16513 desc.dtbd_drops = buf->dtb_drops; 16514 desc.dtbd_errors = buf->dtb_errors; 16515 desc.dtbd_oldest = buf->dtb_xamot_offset; 16516 desc.dtbd_timestamp = dtrace_gethrtime(); 16517 16518 mutex_exit(&dtrace_lock); 16519 16520 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16521 return (EFAULT); 16522 16523 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16524 16525 return (0); 16526 } 16527 16528 if (buf->dtb_tomax == NULL) { 16529 ASSERT(buf->dtb_xamot == NULL); 16530 mutex_exit(&dtrace_lock); 16531 return (ENOENT); 16532 } 16533 16534 cached = buf->dtb_tomax; 16535 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16536 16537 dtrace_xcall(desc.dtbd_cpu, 16538 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16539 16540 state->dts_errors += buf->dtb_xamot_errors; 16541 16542 /* 16543 * If the buffers did not actually switch, then the cross call 16544 * did not take place -- presumably because the given CPU is 16545 * not in the ready set. If this is the case, we'll return 16546 * ENOENT. 16547 */ 16548 if (buf->dtb_tomax == cached) { 16549 ASSERT(buf->dtb_xamot != cached); 16550 mutex_exit(&dtrace_lock); 16551 return (ENOENT); 16552 } 16553 16554 ASSERT(cached == buf->dtb_xamot); 16555 16556 /* 16557 * We have our snapshot; now copy it out. 16558 */ 16559 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16560 buf->dtb_xamot_offset) != 0) { 16561 mutex_exit(&dtrace_lock); 16562 return (EFAULT); 16563 } 16564 16565 desc.dtbd_size = buf->dtb_xamot_offset; 16566 desc.dtbd_drops = buf->dtb_xamot_drops; 16567 desc.dtbd_errors = buf->dtb_xamot_errors; 16568 desc.dtbd_oldest = 0; 16569 desc.dtbd_timestamp = buf->dtb_switched; 16570 16571 mutex_exit(&dtrace_lock); 16572 16573 /* 16574 * Finally, copy out the buffer description. 16575 */ 16576 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16577 return (EFAULT); 16578 16579 return (0); 16580 } 16581 16582 case DTRACEIOC_CONF: { 16583 dtrace_conf_t conf; 16584 16585 bzero(&conf, sizeof (conf)); 16586 conf.dtc_difversion = DIF_VERSION; 16587 conf.dtc_difintregs = DIF_DIR_NREGS; 16588 conf.dtc_diftupregs = DIF_DTR_NREGS; 16589 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16590 16591 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16592 return (EFAULT); 16593 16594 return (0); 16595 } 16596 16597 case DTRACEIOC_STATUS: { 16598 dtrace_status_t stat; 16599 dtrace_dstate_t *dstate; 16600 int i, j; 16601 uint64_t nerrs; 16602 16603 /* 16604 * See the comment in dtrace_state_deadman() for the reason 16605 * for setting dts_laststatus to INT64_MAX before setting 16606 * it to the correct value. 16607 */ 16608 state->dts_laststatus = INT64_MAX; 16609 dtrace_membar_producer(); 16610 state->dts_laststatus = dtrace_gethrtime(); 16611 16612 bzero(&stat, sizeof (stat)); 16613 16614 mutex_enter(&dtrace_lock); 16615 16616 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16617 mutex_exit(&dtrace_lock); 16618 return (ENOENT); 16619 } 16620 16621 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16622 stat.dtst_exiting = 1; 16623 16624 nerrs = state->dts_errors; 16625 dstate = &state->dts_vstate.dtvs_dynvars; 16626 16627 for (i = 0; i < NCPU; i++) { 16628 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16629 16630 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16631 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16632 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16633 16634 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16635 stat.dtst_filled++; 16636 16637 nerrs += state->dts_buffer[i].dtb_errors; 16638 16639 for (j = 0; j < state->dts_nspeculations; j++) { 16640 dtrace_speculation_t *spec; 16641 dtrace_buffer_t *buf; 16642 16643 spec = &state->dts_speculations[j]; 16644 buf = &spec->dtsp_buffer[i]; 16645 stat.dtst_specdrops += buf->dtb_xamot_drops; 16646 } 16647 } 16648 16649 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16650 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16651 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16652 stat.dtst_dblerrors = state->dts_dblerrors; 16653 stat.dtst_killed = 16654 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16655 stat.dtst_errors = nerrs; 16656 16657 mutex_exit(&dtrace_lock); 16658 16659 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16660 return (EFAULT); 16661 16662 return (0); 16663 } 16664 16665 case DTRACEIOC_FORMAT: { 16666 dtrace_fmtdesc_t fmt; 16667 char *str; 16668 int len; 16669 16670 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16671 return (EFAULT); 16672 16673 mutex_enter(&dtrace_lock); 16674 16675 if (fmt.dtfd_format == 0 || 16676 fmt.dtfd_format > state->dts_nformats) { 16677 mutex_exit(&dtrace_lock); 16678 return (EINVAL); 16679 } 16680 16681 /* 16682 * Format strings are allocated contiguously and they are 16683 * never freed; if a format index is less than the number 16684 * of formats, we can assert that the format map is non-NULL 16685 * and that the format for the specified index is non-NULL. 16686 */ 16687 ASSERT(state->dts_formats != NULL); 16688 str = state->dts_formats[fmt.dtfd_format - 1]; 16689 ASSERT(str != NULL); 16690 16691 len = strlen(str) + 1; 16692 16693 if (len > fmt.dtfd_length) { 16694 fmt.dtfd_length = len; 16695 16696 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16697 mutex_exit(&dtrace_lock); 16698 return (EINVAL); 16699 } 16700 } else { 16701 if (copyout(str, fmt.dtfd_string, len) != 0) { 16702 mutex_exit(&dtrace_lock); 16703 return (EINVAL); 16704 } 16705 } 16706 16707 mutex_exit(&dtrace_lock); 16708 return (0); 16709 } 16710 16711 default: 16712 break; 16713 } 16714 16715 return (ENOTTY); 16716 } 16717 16718 /*ARGSUSED*/ 16719 static int 16720 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16721 { 16722 dtrace_state_t *state; 16723 16724 switch (cmd) { 16725 case DDI_DETACH: 16726 break; 16727 16728 case DDI_SUSPEND: 16729 return (DDI_SUCCESS); 16730 16731 default: 16732 return (DDI_FAILURE); 16733 } 16734 16735 mutex_enter(&cpu_lock); 16736 mutex_enter(&dtrace_provider_lock); 16737 mutex_enter(&dtrace_lock); 16738 16739 ASSERT(dtrace_opens == 0); 16740 16741 if (dtrace_helpers > 0) { 16742 mutex_exit(&dtrace_provider_lock); 16743 mutex_exit(&dtrace_lock); 16744 mutex_exit(&cpu_lock); 16745 return (DDI_FAILURE); 16746 } 16747 16748 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16749 mutex_exit(&dtrace_provider_lock); 16750 mutex_exit(&dtrace_lock); 16751 mutex_exit(&cpu_lock); 16752 return (DDI_FAILURE); 16753 } 16754 16755 dtrace_provider = NULL; 16756 16757 if ((state = dtrace_anon_grab()) != NULL) { 16758 /* 16759 * If there were ECBs on this state, the provider should 16760 * have not been allowed to detach; assert that there is 16761 * none. 16762 */ 16763 ASSERT(state->dts_necbs == 0); 16764 dtrace_state_destroy(state); 16765 16766 /* 16767 * If we're being detached with anonymous state, we need to 16768 * indicate to the kernel debugger that DTrace is now inactive. 16769 */ 16770 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16771 } 16772 16773 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16774 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16775 dtrace_cpu_init = NULL; 16776 dtrace_helpers_cleanup = NULL; 16777 dtrace_helpers_fork = NULL; 16778 dtrace_cpustart_init = NULL; 16779 dtrace_cpustart_fini = NULL; 16780 dtrace_debugger_init = NULL; 16781 dtrace_debugger_fini = NULL; 16782 dtrace_modload = NULL; 16783 dtrace_modunload = NULL; 16784 16785 mutex_exit(&cpu_lock); 16786 16787 if (dtrace_helptrace_enabled) { 16788 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16789 dtrace_helptrace_buffer = NULL; 16790 } 16791 16792 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16793 dtrace_probes = NULL; 16794 dtrace_nprobes = 0; 16795 16796 dtrace_hash_destroy(dtrace_bymod); 16797 dtrace_hash_destroy(dtrace_byfunc); 16798 dtrace_hash_destroy(dtrace_byname); 16799 dtrace_bymod = NULL; 16800 dtrace_byfunc = NULL; 16801 dtrace_byname = NULL; 16802 16803 kmem_cache_destroy(dtrace_state_cache); 16804 vmem_destroy(dtrace_minor); 16805 vmem_destroy(dtrace_arena); 16806 16807 if (dtrace_toxrange != NULL) { 16808 kmem_free(dtrace_toxrange, 16809 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16810 dtrace_toxrange = NULL; 16811 dtrace_toxranges = 0; 16812 dtrace_toxranges_max = 0; 16813 } 16814 16815 ddi_remove_minor_node(dtrace_devi, NULL); 16816 dtrace_devi = NULL; 16817 16818 ddi_soft_state_fini(&dtrace_softstate); 16819 16820 ASSERT(dtrace_vtime_references == 0); 16821 ASSERT(dtrace_opens == 0); 16822 ASSERT(dtrace_retained == NULL); 16823 16824 mutex_exit(&dtrace_lock); 16825 mutex_exit(&dtrace_provider_lock); 16826 16827 /* 16828 * We don't destroy the task queue until after we have dropped our 16829 * locks (taskq_destroy() may block on running tasks). To prevent 16830 * attempting to do work after we have effectively detached but before 16831 * the task queue has been destroyed, all tasks dispatched via the 16832 * task queue must check that DTrace is still attached before 16833 * performing any operation. 16834 */ 16835 taskq_destroy(dtrace_taskq); 16836 dtrace_taskq = NULL; 16837 16838 return (DDI_SUCCESS); 16839 } 16840 #endif 16841 16842 #if defined(sun) 16843 /*ARGSUSED*/ 16844 static int 16845 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16846 { 16847 int error; 16848 16849 switch (infocmd) { 16850 case DDI_INFO_DEVT2DEVINFO: 16851 *result = (void *)dtrace_devi; 16852 error = DDI_SUCCESS; 16853 break; 16854 case DDI_INFO_DEVT2INSTANCE: 16855 *result = (void *)0; 16856 error = DDI_SUCCESS; 16857 break; 16858 default: 16859 error = DDI_FAILURE; 16860 } 16861 return (error); 16862 } 16863 #endif 16864 16865 #if defined(sun) 16866 static struct cb_ops dtrace_cb_ops = { 16867 dtrace_open, /* open */ 16868 dtrace_close, /* close */ 16869 nulldev, /* strategy */ 16870 nulldev, /* print */ 16871 nodev, /* dump */ 16872 nodev, /* read */ 16873 nodev, /* write */ 16874 dtrace_ioctl, /* ioctl */ 16875 nodev, /* devmap */ 16876 nodev, /* mmap */ 16877 nodev, /* segmap */ 16878 nochpoll, /* poll */ 16879 ddi_prop_op, /* cb_prop_op */ 16880 0, /* streamtab */ 16881 D_NEW | D_MP /* Driver compatibility flag */ 16882 }; 16883 16884 static struct dev_ops dtrace_ops = { 16885 DEVO_REV, /* devo_rev */ 16886 0, /* refcnt */ 16887 dtrace_info, /* get_dev_info */ 16888 nulldev, /* identify */ 16889 nulldev, /* probe */ 16890 dtrace_attach, /* attach */ 16891 dtrace_detach, /* detach */ 16892 nodev, /* reset */ 16893 &dtrace_cb_ops, /* driver operations */ 16894 NULL, /* bus operations */ 16895 nodev /* dev power */ 16896 }; 16897 16898 static struct modldrv modldrv = { 16899 &mod_driverops, /* module type (this is a pseudo driver) */ 16900 "Dynamic Tracing", /* name of module */ 16901 &dtrace_ops, /* driver ops */ 16902 }; 16903 16904 static struct modlinkage modlinkage = { 16905 MODREV_1, 16906 (void *)&modldrv, 16907 NULL 16908 }; 16909 16910 int 16911 _init(void) 16912 { 16913 return (mod_install(&modlinkage)); 16914 } 16915 16916 int 16917 _info(struct modinfo *modinfop) 16918 { 16919 return (mod_info(&modlinkage, modinfop)); 16920 } 16921 16922 int 16923 _fini(void) 16924 { 16925 return (mod_remove(&modlinkage)); 16926 } 16927 #else 16928 16929 static d_ioctl_t dtrace_ioctl; 16930 static d_ioctl_t dtrace_ioctl_helper; 16931 static void dtrace_load(void *); 16932 static int dtrace_unload(void); 16933 #if __FreeBSD_version < 800039 16934 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16935 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16936 static eventhandler_tag eh_tag; /* Event handler tag. */ 16937 #else 16938 static struct cdev *dtrace_dev; 16939 static struct cdev *helper_dev; 16940 #endif 16941 16942 void dtrace_invop_init(void); 16943 void dtrace_invop_uninit(void); 16944 16945 static struct cdevsw dtrace_cdevsw = { 16946 .d_version = D_VERSION, 16947 #if __FreeBSD_version < 800039 16948 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16949 .d_close = dtrace_close, 16950 #endif 16951 .d_ioctl = dtrace_ioctl, 16952 .d_open = dtrace_open, 16953 .d_name = "dtrace", 16954 }; 16955 16956 static struct cdevsw helper_cdevsw = { 16957 .d_version = D_VERSION, 16958 .d_ioctl = dtrace_ioctl_helper, 16959 .d_name = "helper", 16960 }; 16961 16962 #include <dtrace_anon.c> 16963 #if __FreeBSD_version < 800039 16964 #include <dtrace_clone.c> 16965 #endif 16966 #include <dtrace_ioctl.c> 16967 #include <dtrace_load.c> 16968 #include <dtrace_modevent.c> 16969 #include <dtrace_sysctl.c> 16970 #include <dtrace_unload.c> 16971 #include <dtrace_vtime.c> 16972 #include <dtrace_hacks.c> 16973 #include <dtrace_isa.c> 16974 16975 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16976 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16977 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16978 16979 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16980 MODULE_VERSION(dtrace, 1); 16981 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16982 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16983 #endif 16984