1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2016, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69 #include <sys/errno.h> 70 #ifndef illumos 71 #include <sys/time.h> 72 #endif 73 #include <sys/stat.h> 74 #include <sys/modctl.h> 75 #include <sys/conf.h> 76 #include <sys/systm.h> 77 #ifdef illumos 78 #include <sys/ddi.h> 79 #include <sys/sunddi.h> 80 #endif 81 #include <sys/cpuvar.h> 82 #include <sys/kmem.h> 83 #ifdef illumos 84 #include <sys/strsubr.h> 85 #endif 86 #include <sys/sysmacros.h> 87 #include <sys/dtrace_impl.h> 88 #include <sys/atomic.h> 89 #include <sys/cmn_err.h> 90 #ifdef illumos 91 #include <sys/mutex_impl.h> 92 #include <sys/rwlock_impl.h> 93 #endif 94 #include <sys/ctf_api.h> 95 #ifdef illumos 96 #include <sys/panic.h> 97 #include <sys/priv_impl.h> 98 #endif 99 #include <sys/policy.h> 100 #ifdef illumos 101 #include <sys/cred_impl.h> 102 #include <sys/procfs_isa.h> 103 #endif 104 #include <sys/taskq.h> 105 #ifdef illumos 106 #include <sys/mkdev.h> 107 #include <sys/kdi.h> 108 #endif 109 #include <sys/zone.h> 110 #include <sys/socket.h> 111 #include <netinet/in.h> 112 #include "strtolctype.h" 113 114 /* FreeBSD includes: */ 115 #ifndef illumos 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/eventhandler.h> 119 #include <sys/limits.h> 120 #include <sys/linker.h> 121 #include <sys/kdb.h> 122 #include <sys/kernel.h> 123 #include <sys/malloc.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/ptrace.h> 127 #include <sys/rwlock.h> 128 #include <sys/sx.h> 129 #include <sys/sysctl.h> 130 131 #include <sys/dtrace_bsd.h> 132 133 #include <netinet/in.h> 134 135 #include "dtrace_cddl.h" 136 #include "dtrace_debug.c" 137 #endif 138 139 /* 140 * DTrace Tunable Variables 141 * 142 * The following variables may be tuned by adding a line to /etc/system that 143 * includes both the name of the DTrace module ("dtrace") and the name of the 144 * variable. For example: 145 * 146 * set dtrace:dtrace_destructive_disallow = 1 147 * 148 * In general, the only variables that one should be tuning this way are those 149 * that affect system-wide DTrace behavior, and for which the default behavior 150 * is undesirable. Most of these variables are tunable on a per-consumer 151 * basis using DTrace options, and need not be tuned on a system-wide basis. 152 * When tuning these variables, avoid pathological values; while some attempt 153 * is made to verify the integrity of these variables, they are not considered 154 * part of the supported interface to DTrace, and they are therefore not 155 * checked comprehensively. Further, these variables should not be tuned 156 * dynamically via "mdb -kw" or other means; they should only be tuned via 157 * /etc/system. 158 */ 159 int dtrace_destructive_disallow = 0; 160 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 161 size_t dtrace_difo_maxsize = (256 * 1024); 162 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 163 size_t dtrace_statvar_maxsize = (16 * 1024); 164 size_t dtrace_actions_max = (16 * 1024); 165 size_t dtrace_retain_max = 1024; 166 dtrace_optval_t dtrace_helper_actions_max = 128; 167 dtrace_optval_t dtrace_helper_providers_max = 32; 168 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 169 size_t dtrace_strsize_default = 256; 170 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 171 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 172 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 173 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 174 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 175 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 176 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 177 dtrace_optval_t dtrace_nspec_default = 1; 178 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 179 dtrace_optval_t dtrace_stackframes_default = 20; 180 dtrace_optval_t dtrace_ustackframes_default = 20; 181 dtrace_optval_t dtrace_jstackframes_default = 50; 182 dtrace_optval_t dtrace_jstackstrsize_default = 512; 183 int dtrace_msgdsize_max = 128; 184 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ 185 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 186 int dtrace_devdepth_max = 32; 187 int dtrace_err_verbose; 188 hrtime_t dtrace_deadman_interval = NANOSEC; 189 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 190 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 191 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 192 #ifndef illumos 193 int dtrace_memstr_max = 4096; 194 #endif 195 196 /* 197 * DTrace External Variables 198 * 199 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 200 * available to DTrace consumers via the backtick (`) syntax. One of these, 201 * dtrace_zero, is made deliberately so: it is provided as a source of 202 * well-known, zero-filled memory. While this variable is not documented, 203 * it is used by some translators as an implementation detail. 204 */ 205 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 206 207 /* 208 * DTrace Internal Variables 209 */ 210 #ifdef illumos 211 static dev_info_t *dtrace_devi; /* device info */ 212 #endif 213 #ifdef illumos 214 static vmem_t *dtrace_arena; /* probe ID arena */ 215 static vmem_t *dtrace_minor; /* minor number arena */ 216 #else 217 static taskq_t *dtrace_taskq; /* task queue */ 218 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 219 #endif 220 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 221 static int dtrace_nprobes; /* number of probes */ 222 static dtrace_provider_t *dtrace_provider; /* provider list */ 223 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 224 static int dtrace_opens; /* number of opens */ 225 static int dtrace_helpers; /* number of helpers */ 226 static int dtrace_getf; /* number of unpriv getf()s */ 227 #ifdef illumos 228 static void *dtrace_softstate; /* softstate pointer */ 229 #endif 230 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 231 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 232 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 233 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 234 static int dtrace_toxranges; /* number of toxic ranges */ 235 static int dtrace_toxranges_max; /* size of toxic range array */ 236 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 237 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 238 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 239 static kthread_t *dtrace_panicked; /* panicking thread */ 240 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 241 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 242 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 243 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 244 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 245 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 246 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 247 #ifndef illumos 248 static struct mtx dtrace_unr_mtx; 249 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 250 static eventhandler_tag dtrace_kld_load_tag; 251 static eventhandler_tag dtrace_kld_unload_try_tag; 252 #endif 253 254 /* 255 * DTrace Locking 256 * DTrace is protected by three (relatively coarse-grained) locks: 257 * 258 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 259 * including enabling state, probes, ECBs, consumer state, helper state, 260 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 261 * probe context is lock-free -- synchronization is handled via the 262 * dtrace_sync() cross call mechanism. 263 * 264 * (2) dtrace_provider_lock is required when manipulating provider state, or 265 * when provider state must be held constant. 266 * 267 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 268 * when meta provider state must be held constant. 269 * 270 * The lock ordering between these three locks is dtrace_meta_lock before 271 * dtrace_provider_lock before dtrace_lock. (In particular, there are 272 * several places where dtrace_provider_lock is held by the framework as it 273 * calls into the providers -- which then call back into the framework, 274 * grabbing dtrace_lock.) 275 * 276 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 277 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 278 * role as a coarse-grained lock; it is acquired before both of these locks. 279 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 280 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 281 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 282 * acquired _between_ dtrace_provider_lock and dtrace_lock. 283 */ 284 static kmutex_t dtrace_lock; /* probe state lock */ 285 static kmutex_t dtrace_provider_lock; /* provider state lock */ 286 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 287 288 #ifndef illumos 289 /* XXX FreeBSD hacks. */ 290 #define cr_suid cr_svuid 291 #define cr_sgid cr_svgid 292 #define ipaddr_t in_addr_t 293 #define mod_modname pathname 294 #define vuprintf vprintf 295 #define ttoproc(_a) ((_a)->td_proc) 296 #define crgetzoneid(_a) 0 297 #define NCPU MAXCPU 298 #define SNOCD 0 299 #define CPU_ON_INTR(_a) 0 300 301 #define PRIV_EFFECTIVE (1 << 0) 302 #define PRIV_DTRACE_KERNEL (1 << 1) 303 #define PRIV_DTRACE_PROC (1 << 2) 304 #define PRIV_DTRACE_USER (1 << 3) 305 #define PRIV_PROC_OWNER (1 << 4) 306 #define PRIV_PROC_ZONE (1 << 5) 307 #define PRIV_ALL ~0 308 309 SYSCTL_DECL(_debug_dtrace); 310 SYSCTL_DECL(_kern_dtrace); 311 #endif 312 313 #ifdef illumos 314 #define curcpu CPU->cpu_id 315 #endif 316 317 318 /* 319 * DTrace Provider Variables 320 * 321 * These are the variables relating to DTrace as a provider (that is, the 322 * provider of the BEGIN, END, and ERROR probes). 323 */ 324 static dtrace_pattr_t dtrace_provider_attr = { 325 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 326 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 327 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 328 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 329 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 330 }; 331 332 static void 333 dtrace_nullop(void) 334 {} 335 336 static dtrace_pops_t dtrace_provider_ops = { 337 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 338 (void (*)(void *, modctl_t *))dtrace_nullop, 339 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 340 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 341 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 342 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 343 NULL, 344 NULL, 345 NULL, 346 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 347 }; 348 349 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 350 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 351 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 352 353 /* 354 * DTrace Helper Tracing Variables 355 * 356 * These variables should be set dynamically to enable helper tracing. The 357 * only variables that should be set are dtrace_helptrace_enable (which should 358 * be set to a non-zero value to allocate helper tracing buffers on the next 359 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a 360 * non-zero value to deallocate helper tracing buffers on the next close of 361 * /dev/dtrace). When (and only when) helper tracing is disabled, the 362 * buffer size may also be set via dtrace_helptrace_bufsize. 363 */ 364 int dtrace_helptrace_enable = 0; 365 int dtrace_helptrace_disable = 0; 366 int dtrace_helptrace_bufsize = 16 * 1024 * 1024; 367 uint32_t dtrace_helptrace_nlocals; 368 static dtrace_helptrace_t *dtrace_helptrace_buffer; 369 static uint32_t dtrace_helptrace_next = 0; 370 static int dtrace_helptrace_wrapped = 0; 371 372 /* 373 * DTrace Error Hashing 374 * 375 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 376 * table. This is very useful for checking coverage of tests that are 377 * expected to induce DIF or DOF processing errors, and may be useful for 378 * debugging problems in the DIF code generator or in DOF generation . The 379 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 380 */ 381 #ifdef DEBUG 382 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 383 static const char *dtrace_errlast; 384 static kthread_t *dtrace_errthread; 385 static kmutex_t dtrace_errlock; 386 #endif 387 388 /* 389 * DTrace Macros and Constants 390 * 391 * These are various macros that are useful in various spots in the 392 * implementation, along with a few random constants that have no meaning 393 * outside of the implementation. There is no real structure to this cpp 394 * mishmash -- but is there ever? 395 */ 396 #define DTRACE_HASHSTR(hash, probe) \ 397 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 398 399 #define DTRACE_HASHNEXT(hash, probe) \ 400 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 401 402 #define DTRACE_HASHPREV(hash, probe) \ 403 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 404 405 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 406 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 407 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 408 409 #define DTRACE_AGGHASHSIZE_SLEW 17 410 411 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 412 413 /* 414 * The key for a thread-local variable consists of the lower 61 bits of the 415 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 416 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 417 * equal to a variable identifier. This is necessary (but not sufficient) to 418 * assure that global associative arrays never collide with thread-local 419 * variables. To guarantee that they cannot collide, we must also define the 420 * order for keying dynamic variables. That order is: 421 * 422 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 423 * 424 * Because the variable-key and the tls-key are in orthogonal spaces, there is 425 * no way for a global variable key signature to match a thread-local key 426 * signature. 427 */ 428 #ifdef illumos 429 #define DTRACE_TLS_THRKEY(where) { \ 430 uint_t intr = 0; \ 431 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 432 for (; actv; actv >>= 1) \ 433 intr++; \ 434 ASSERT(intr < (1 << 3)); \ 435 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 436 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 437 } 438 #else 439 #define DTRACE_TLS_THRKEY(where) { \ 440 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 441 uint_t intr = 0; \ 442 uint_t actv = _c->cpu_intr_actv; \ 443 for (; actv; actv >>= 1) \ 444 intr++; \ 445 ASSERT(intr < (1 << 3)); \ 446 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 447 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 448 } 449 #endif 450 451 #define DT_BSWAP_8(x) ((x) & 0xff) 452 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 453 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 454 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 455 456 #define DT_MASK_LO 0x00000000FFFFFFFFULL 457 458 #define DTRACE_STORE(type, tomax, offset, what) \ 459 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 460 461 #ifndef __x86 462 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 463 if (addr & (size - 1)) { \ 464 *flags |= CPU_DTRACE_BADALIGN; \ 465 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 466 return (0); \ 467 } 468 #else 469 #define DTRACE_ALIGNCHECK(addr, size, flags) 470 #endif 471 472 /* 473 * Test whether a range of memory starting at testaddr of size testsz falls 474 * within the range of memory described by addr, sz. We take care to avoid 475 * problems with overflow and underflow of the unsigned quantities, and 476 * disallow all negative sizes. Ranges of size 0 are allowed. 477 */ 478 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 479 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 480 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 481 (testaddr) + (testsz) >= (testaddr)) 482 483 /* 484 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 485 * alloc_sz on the righthand side of the comparison in order to avoid overflow 486 * or underflow in the comparison with it. This is simpler than the INRANGE 487 * check above, because we know that the dtms_scratch_ptr is valid in the 488 * range. Allocations of size zero are allowed. 489 */ 490 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 491 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 492 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 493 494 #define DTRACE_LOADFUNC(bits) \ 495 /*CSTYLED*/ \ 496 uint##bits##_t \ 497 dtrace_load##bits(uintptr_t addr) \ 498 { \ 499 size_t size = bits / NBBY; \ 500 /*CSTYLED*/ \ 501 uint##bits##_t rval; \ 502 int i; \ 503 volatile uint16_t *flags = (volatile uint16_t *) \ 504 &cpu_core[curcpu].cpuc_dtrace_flags; \ 505 \ 506 DTRACE_ALIGNCHECK(addr, size, flags); \ 507 \ 508 for (i = 0; i < dtrace_toxranges; i++) { \ 509 if (addr >= dtrace_toxrange[i].dtt_limit) \ 510 continue; \ 511 \ 512 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 513 continue; \ 514 \ 515 /* \ 516 * This address falls within a toxic region; return 0. \ 517 */ \ 518 *flags |= CPU_DTRACE_BADADDR; \ 519 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 520 return (0); \ 521 } \ 522 \ 523 *flags |= CPU_DTRACE_NOFAULT; \ 524 /*CSTYLED*/ \ 525 rval = *((volatile uint##bits##_t *)addr); \ 526 *flags &= ~CPU_DTRACE_NOFAULT; \ 527 \ 528 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 529 } 530 531 #ifdef _LP64 532 #define dtrace_loadptr dtrace_load64 533 #else 534 #define dtrace_loadptr dtrace_load32 535 #endif 536 537 #define DTRACE_DYNHASH_FREE 0 538 #define DTRACE_DYNHASH_SINK 1 539 #define DTRACE_DYNHASH_VALID 2 540 541 #define DTRACE_MATCH_NEXT 0 542 #define DTRACE_MATCH_DONE 1 543 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 544 #define DTRACE_STATE_ALIGN 64 545 546 #define DTRACE_FLAGS2FLT(flags) \ 547 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 548 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 549 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 550 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 551 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 552 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 553 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 554 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 555 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 556 DTRACEFLT_UNKNOWN) 557 558 #define DTRACEACT_ISSTRING(act) \ 559 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 560 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 561 562 /* Function prototype definitions: */ 563 static size_t dtrace_strlen(const char *, size_t); 564 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 565 static void dtrace_enabling_provide(dtrace_provider_t *); 566 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 567 static void dtrace_enabling_matchall(void); 568 static void dtrace_enabling_reap(void); 569 static dtrace_state_t *dtrace_anon_grab(void); 570 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 571 dtrace_state_t *, uint64_t, uint64_t); 572 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 573 static void dtrace_buffer_drop(dtrace_buffer_t *); 574 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 575 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 576 dtrace_state_t *, dtrace_mstate_t *); 577 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 578 dtrace_optval_t); 579 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 580 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 581 uint16_t dtrace_load16(uintptr_t); 582 uint32_t dtrace_load32(uintptr_t); 583 uint64_t dtrace_load64(uintptr_t); 584 uint8_t dtrace_load8(uintptr_t); 585 void dtrace_dynvar_clean(dtrace_dstate_t *); 586 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 587 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 588 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 589 static int dtrace_priv_proc(dtrace_state_t *); 590 static void dtrace_getf_barrier(void); 591 592 /* 593 * DTrace Probe Context Functions 594 * 595 * These functions are called from probe context. Because probe context is 596 * any context in which C may be called, arbitrarily locks may be held, 597 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 598 * As a result, functions called from probe context may only call other DTrace 599 * support functions -- they may not interact at all with the system at large. 600 * (Note that the ASSERT macro is made probe-context safe by redefining it in 601 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 602 * loads are to be performed from probe context, they _must_ be in terms of 603 * the safe dtrace_load*() variants. 604 * 605 * Some functions in this block are not actually called from probe context; 606 * for these functions, there will be a comment above the function reading 607 * "Note: not called from probe context." 608 */ 609 void 610 dtrace_panic(const char *format, ...) 611 { 612 va_list alist; 613 614 va_start(alist, format); 615 #ifdef __FreeBSD__ 616 vpanic(format, alist); 617 #else 618 dtrace_vpanic(format, alist); 619 #endif 620 va_end(alist); 621 } 622 623 int 624 dtrace_assfail(const char *a, const char *f, int l) 625 { 626 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 627 628 /* 629 * We just need something here that even the most clever compiler 630 * cannot optimize away. 631 */ 632 return (a[(uintptr_t)f]); 633 } 634 635 /* 636 * Atomically increment a specified error counter from probe context. 637 */ 638 static void 639 dtrace_error(uint32_t *counter) 640 { 641 /* 642 * Most counters stored to in probe context are per-CPU counters. 643 * However, there are some error conditions that are sufficiently 644 * arcane that they don't merit per-CPU storage. If these counters 645 * are incremented concurrently on different CPUs, scalability will be 646 * adversely affected -- but we don't expect them to be white-hot in a 647 * correctly constructed enabling... 648 */ 649 uint32_t oval, nval; 650 651 do { 652 oval = *counter; 653 654 if ((nval = oval + 1) == 0) { 655 /* 656 * If the counter would wrap, set it to 1 -- assuring 657 * that the counter is never zero when we have seen 658 * errors. (The counter must be 32-bits because we 659 * aren't guaranteed a 64-bit compare&swap operation.) 660 * To save this code both the infamy of being fingered 661 * by a priggish news story and the indignity of being 662 * the target of a neo-puritan witch trial, we're 663 * carefully avoiding any colorful description of the 664 * likelihood of this condition -- but suffice it to 665 * say that it is only slightly more likely than the 666 * overflow of predicate cache IDs, as discussed in 667 * dtrace_predicate_create(). 668 */ 669 nval = 1; 670 } 671 } while (dtrace_cas32(counter, oval, nval) != oval); 672 } 673 674 /* 675 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 676 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 677 */ 678 /* BEGIN CSTYLED */ 679 DTRACE_LOADFUNC(8) 680 DTRACE_LOADFUNC(16) 681 DTRACE_LOADFUNC(32) 682 DTRACE_LOADFUNC(64) 683 /* END CSTYLED */ 684 685 static int 686 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 687 { 688 if (dest < mstate->dtms_scratch_base) 689 return (0); 690 691 if (dest + size < dest) 692 return (0); 693 694 if (dest + size > mstate->dtms_scratch_ptr) 695 return (0); 696 697 return (1); 698 } 699 700 static int 701 dtrace_canstore_statvar(uint64_t addr, size_t sz, 702 dtrace_statvar_t **svars, int nsvars) 703 { 704 int i; 705 size_t maxglobalsize, maxlocalsize; 706 707 if (nsvars == 0) 708 return (0); 709 710 maxglobalsize = dtrace_statvar_maxsize; 711 maxlocalsize = (maxglobalsize + sizeof (uint64_t)) * NCPU; 712 713 for (i = 0; i < nsvars; i++) { 714 dtrace_statvar_t *svar = svars[i]; 715 uint8_t scope; 716 size_t size; 717 718 if (svar == NULL || (size = svar->dtsv_size) == 0) 719 continue; 720 721 scope = svar->dtsv_var.dtdv_scope; 722 723 /* 724 * We verify that our size is valid in the spirit of providing 725 * defense in depth: we want to prevent attackers from using 726 * DTrace to escalate an orthogonal kernel heap corruption bug 727 * into the ability to store to arbitrary locations in memory. 728 */ 729 VERIFY((scope == DIFV_SCOPE_GLOBAL && size < maxglobalsize) || 730 (scope == DIFV_SCOPE_LOCAL && size < maxlocalsize)); 731 732 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 733 return (1); 734 } 735 736 return (0); 737 } 738 739 /* 740 * Check to see if the address is within a memory region to which a store may 741 * be issued. This includes the DTrace scratch areas, and any DTrace variable 742 * region. The caller of dtrace_canstore() is responsible for performing any 743 * alignment checks that are needed before stores are actually executed. 744 */ 745 static int 746 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 747 dtrace_vstate_t *vstate) 748 { 749 /* 750 * First, check to see if the address is in scratch space... 751 */ 752 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 753 mstate->dtms_scratch_size)) 754 return (1); 755 756 /* 757 * Now check to see if it's a dynamic variable. This check will pick 758 * up both thread-local variables and any global dynamically-allocated 759 * variables. 760 */ 761 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 762 vstate->dtvs_dynvars.dtds_size)) { 763 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 764 uintptr_t base = (uintptr_t)dstate->dtds_base + 765 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 766 uintptr_t chunkoffs; 767 dtrace_dynvar_t *dvar; 768 769 /* 770 * Before we assume that we can store here, we need to make 771 * sure that it isn't in our metadata -- storing to our 772 * dynamic variable metadata would corrupt our state. For 773 * the range to not include any dynamic variable metadata, 774 * it must: 775 * 776 * (1) Start above the hash table that is at the base of 777 * the dynamic variable space 778 * 779 * (2) Have a starting chunk offset that is beyond the 780 * dtrace_dynvar_t that is at the base of every chunk 781 * 782 * (3) Not span a chunk boundary 783 * 784 * (4) Not be in the tuple space of a dynamic variable 785 * 786 */ 787 if (addr < base) 788 return (0); 789 790 chunkoffs = (addr - base) % dstate->dtds_chunksize; 791 792 if (chunkoffs < sizeof (dtrace_dynvar_t)) 793 return (0); 794 795 if (chunkoffs + sz > dstate->dtds_chunksize) 796 return (0); 797 798 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs); 799 800 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) 801 return (0); 802 803 if (chunkoffs < sizeof (dtrace_dynvar_t) + 804 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t))) 805 return (0); 806 807 return (1); 808 } 809 810 /* 811 * Finally, check the static local and global variables. These checks 812 * take the longest, so we perform them last. 813 */ 814 if (dtrace_canstore_statvar(addr, sz, 815 vstate->dtvs_locals, vstate->dtvs_nlocals)) 816 return (1); 817 818 if (dtrace_canstore_statvar(addr, sz, 819 vstate->dtvs_globals, vstate->dtvs_nglobals)) 820 return (1); 821 822 return (0); 823 } 824 825 826 /* 827 * Convenience routine to check to see if the address is within a memory 828 * region in which a load may be issued given the user's privilege level; 829 * if not, it sets the appropriate error flags and loads 'addr' into the 830 * illegal value slot. 831 * 832 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 833 * appropriate memory access protection. 834 */ 835 static int 836 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 837 dtrace_vstate_t *vstate) 838 { 839 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 840 file_t *fp; 841 842 /* 843 * If we hold the privilege to read from kernel memory, then 844 * everything is readable. 845 */ 846 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 847 return (1); 848 849 /* 850 * You can obviously read that which you can store. 851 */ 852 if (dtrace_canstore(addr, sz, mstate, vstate)) 853 return (1); 854 855 /* 856 * We're allowed to read from our own string table. 857 */ 858 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 859 mstate->dtms_difo->dtdo_strlen)) 860 return (1); 861 862 if (vstate->dtvs_state != NULL && 863 dtrace_priv_proc(vstate->dtvs_state)) { 864 proc_t *p; 865 866 /* 867 * When we have privileges to the current process, there are 868 * several context-related kernel structures that are safe to 869 * read, even absent the privilege to read from kernel memory. 870 * These reads are safe because these structures contain only 871 * state that (1) we're permitted to read, (2) is harmless or 872 * (3) contains pointers to additional kernel state that we're 873 * not permitted to read (and as such, do not present an 874 * opportunity for privilege escalation). Finally (and 875 * critically), because of the nature of their relation with 876 * the current thread context, the memory associated with these 877 * structures cannot change over the duration of probe context, 878 * and it is therefore impossible for this memory to be 879 * deallocated and reallocated as something else while it's 880 * being operated upon. 881 */ 882 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) 883 return (1); 884 885 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 886 sz, curthread->t_procp, sizeof (proc_t))) { 887 return (1); 888 } 889 890 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 891 curthread->t_cred, sizeof (cred_t))) { 892 return (1); 893 } 894 895 #ifdef illumos 896 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 897 &(p->p_pidp->pid_id), sizeof (pid_t))) { 898 return (1); 899 } 900 901 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 902 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 903 return (1); 904 } 905 #endif 906 } 907 908 if ((fp = mstate->dtms_getf) != NULL) { 909 uintptr_t psz = sizeof (void *); 910 vnode_t *vp; 911 vnodeops_t *op; 912 913 /* 914 * When getf() returns a file_t, the enabling is implicitly 915 * granted the (transient) right to read the returned file_t 916 * as well as the v_path and v_op->vnop_name of the underlying 917 * vnode. These accesses are allowed after a successful 918 * getf() because the members that they refer to cannot change 919 * once set -- and the barrier logic in the kernel's closef() 920 * path assures that the file_t and its referenced vode_t 921 * cannot themselves be stale (that is, it impossible for 922 * either dtms_getf itself or its f_vnode member to reference 923 * freed memory). 924 */ 925 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) 926 return (1); 927 928 if ((vp = fp->f_vnode) != NULL) { 929 #ifdef illumos 930 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) 931 return (1); 932 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz, 933 vp->v_path, strlen(vp->v_path) + 1)) { 934 return (1); 935 } 936 #endif 937 938 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) 939 return (1); 940 941 #ifdef illumos 942 if ((op = vp->v_op) != NULL && 943 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 944 return (1); 945 } 946 947 if (op != NULL && op->vnop_name != NULL && 948 DTRACE_INRANGE(addr, sz, op->vnop_name, 949 strlen(op->vnop_name) + 1)) { 950 return (1); 951 } 952 #endif 953 } 954 } 955 956 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 957 *illval = addr; 958 return (0); 959 } 960 961 /* 962 * Convenience routine to check to see if a given string is within a memory 963 * region in which a load may be issued given the user's privilege level; 964 * this exists so that we don't need to issue unnecessary dtrace_strlen() 965 * calls in the event that the user has all privileges. 966 */ 967 static int 968 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 969 dtrace_vstate_t *vstate) 970 { 971 size_t strsz; 972 973 /* 974 * If we hold the privilege to read from kernel memory, then 975 * everything is readable. 976 */ 977 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 978 return (1); 979 980 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 981 if (dtrace_canload(addr, strsz, mstate, vstate)) 982 return (1); 983 984 return (0); 985 } 986 987 /* 988 * Convenience routine to check to see if a given variable is within a memory 989 * region in which a load may be issued given the user's privilege level. 990 */ 991 static int 992 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 993 dtrace_vstate_t *vstate) 994 { 995 size_t sz; 996 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 997 998 /* 999 * If we hold the privilege to read from kernel memory, then 1000 * everything is readable. 1001 */ 1002 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 1003 return (1); 1004 1005 if (type->dtdt_kind == DIF_TYPE_STRING) 1006 sz = dtrace_strlen(src, 1007 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 1008 else 1009 sz = type->dtdt_size; 1010 1011 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 1012 } 1013 1014 /* 1015 * Convert a string to a signed integer using safe loads. 1016 * 1017 * NOTE: This function uses various macros from strtolctype.h to manipulate 1018 * digit values, etc -- these have all been checked to ensure they make 1019 * no additional function calls. 1020 */ 1021 static int64_t 1022 dtrace_strtoll(char *input, int base, size_t limit) 1023 { 1024 uintptr_t pos = (uintptr_t)input; 1025 int64_t val = 0; 1026 int x; 1027 boolean_t neg = B_FALSE; 1028 char c, cc, ccc; 1029 uintptr_t end = pos + limit; 1030 1031 /* 1032 * Consume any whitespace preceding digits. 1033 */ 1034 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 1035 pos++; 1036 1037 /* 1038 * Handle an explicit sign if one is present. 1039 */ 1040 if (c == '-' || c == '+') { 1041 if (c == '-') 1042 neg = B_TRUE; 1043 c = dtrace_load8(++pos); 1044 } 1045 1046 /* 1047 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1048 * if present. 1049 */ 1050 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1051 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1052 pos += 2; 1053 c = ccc; 1054 } 1055 1056 /* 1057 * Read in contiguous digits until the first non-digit character. 1058 */ 1059 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1060 c = dtrace_load8(++pos)) 1061 val = val * base + x; 1062 1063 return (neg ? -val : val); 1064 } 1065 1066 /* 1067 * Compare two strings using safe loads. 1068 */ 1069 static int 1070 dtrace_strncmp(char *s1, char *s2, size_t limit) 1071 { 1072 uint8_t c1, c2; 1073 volatile uint16_t *flags; 1074 1075 if (s1 == s2 || limit == 0) 1076 return (0); 1077 1078 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1079 1080 do { 1081 if (s1 == NULL) { 1082 c1 = '\0'; 1083 } else { 1084 c1 = dtrace_load8((uintptr_t)s1++); 1085 } 1086 1087 if (s2 == NULL) { 1088 c2 = '\0'; 1089 } else { 1090 c2 = dtrace_load8((uintptr_t)s2++); 1091 } 1092 1093 if (c1 != c2) 1094 return (c1 - c2); 1095 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1096 1097 return (0); 1098 } 1099 1100 /* 1101 * Compute strlen(s) for a string using safe memory accesses. The additional 1102 * len parameter is used to specify a maximum length to ensure completion. 1103 */ 1104 static size_t 1105 dtrace_strlen(const char *s, size_t lim) 1106 { 1107 uint_t len; 1108 1109 for (len = 0; len != lim; len++) { 1110 if (dtrace_load8((uintptr_t)s++) == '\0') 1111 break; 1112 } 1113 1114 return (len); 1115 } 1116 1117 /* 1118 * Check if an address falls within a toxic region. 1119 */ 1120 static int 1121 dtrace_istoxic(uintptr_t kaddr, size_t size) 1122 { 1123 uintptr_t taddr, tsize; 1124 int i; 1125 1126 for (i = 0; i < dtrace_toxranges; i++) { 1127 taddr = dtrace_toxrange[i].dtt_base; 1128 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1129 1130 if (kaddr - taddr < tsize) { 1131 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1132 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1133 return (1); 1134 } 1135 1136 if (taddr - kaddr < size) { 1137 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1138 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1139 return (1); 1140 } 1141 } 1142 1143 return (0); 1144 } 1145 1146 /* 1147 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1148 * memory specified by the DIF program. The dst is assumed to be safe memory 1149 * that we can store to directly because it is managed by DTrace. As with 1150 * standard bcopy, overlapping copies are handled properly. 1151 */ 1152 static void 1153 dtrace_bcopy(const void *src, void *dst, size_t len) 1154 { 1155 if (len != 0) { 1156 uint8_t *s1 = dst; 1157 const uint8_t *s2 = src; 1158 1159 if (s1 <= s2) { 1160 do { 1161 *s1++ = dtrace_load8((uintptr_t)s2++); 1162 } while (--len != 0); 1163 } else { 1164 s2 += len; 1165 s1 += len; 1166 1167 do { 1168 *--s1 = dtrace_load8((uintptr_t)--s2); 1169 } while (--len != 0); 1170 } 1171 } 1172 } 1173 1174 /* 1175 * Copy src to dst using safe memory accesses, up to either the specified 1176 * length, or the point that a nul byte is encountered. The src is assumed to 1177 * be unsafe memory specified by the DIF program. The dst is assumed to be 1178 * safe memory that we can store to directly because it is managed by DTrace. 1179 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1180 */ 1181 static void 1182 dtrace_strcpy(const void *src, void *dst, size_t len) 1183 { 1184 if (len != 0) { 1185 uint8_t *s1 = dst, c; 1186 const uint8_t *s2 = src; 1187 1188 do { 1189 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1190 } while (--len != 0 && c != '\0'); 1191 } 1192 } 1193 1194 /* 1195 * Copy src to dst, deriving the size and type from the specified (BYREF) 1196 * variable type. The src is assumed to be unsafe memory specified by the DIF 1197 * program. The dst is assumed to be DTrace variable memory that is of the 1198 * specified type; we assume that we can store to directly. 1199 */ 1200 static void 1201 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1202 { 1203 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1204 1205 if (type->dtdt_kind == DIF_TYPE_STRING) { 1206 dtrace_strcpy(src, dst, type->dtdt_size); 1207 } else { 1208 dtrace_bcopy(src, dst, type->dtdt_size); 1209 } 1210 } 1211 1212 /* 1213 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1214 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1215 * safe memory that we can access directly because it is managed by DTrace. 1216 */ 1217 static int 1218 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1219 { 1220 volatile uint16_t *flags; 1221 1222 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1223 1224 if (s1 == s2) 1225 return (0); 1226 1227 if (s1 == NULL || s2 == NULL) 1228 return (1); 1229 1230 if (s1 != s2 && len != 0) { 1231 const uint8_t *ps1 = s1; 1232 const uint8_t *ps2 = s2; 1233 1234 do { 1235 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1236 return (1); 1237 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1238 } 1239 return (0); 1240 } 1241 1242 /* 1243 * Zero the specified region using a simple byte-by-byte loop. Note that this 1244 * is for safe DTrace-managed memory only. 1245 */ 1246 static void 1247 dtrace_bzero(void *dst, size_t len) 1248 { 1249 uchar_t *cp; 1250 1251 for (cp = dst; len != 0; len--) 1252 *cp++ = 0; 1253 } 1254 1255 static void 1256 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1257 { 1258 uint64_t result[2]; 1259 1260 result[0] = addend1[0] + addend2[0]; 1261 result[1] = addend1[1] + addend2[1] + 1262 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1263 1264 sum[0] = result[0]; 1265 sum[1] = result[1]; 1266 } 1267 1268 /* 1269 * Shift the 128-bit value in a by b. If b is positive, shift left. 1270 * If b is negative, shift right. 1271 */ 1272 static void 1273 dtrace_shift_128(uint64_t *a, int b) 1274 { 1275 uint64_t mask; 1276 1277 if (b == 0) 1278 return; 1279 1280 if (b < 0) { 1281 b = -b; 1282 if (b >= 64) { 1283 a[0] = a[1] >> (b - 64); 1284 a[1] = 0; 1285 } else { 1286 a[0] >>= b; 1287 mask = 1LL << (64 - b); 1288 mask -= 1; 1289 a[0] |= ((a[1] & mask) << (64 - b)); 1290 a[1] >>= b; 1291 } 1292 } else { 1293 if (b >= 64) { 1294 a[1] = a[0] << (b - 64); 1295 a[0] = 0; 1296 } else { 1297 a[1] <<= b; 1298 mask = a[0] >> (64 - b); 1299 a[1] |= mask; 1300 a[0] <<= b; 1301 } 1302 } 1303 } 1304 1305 /* 1306 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1307 * use native multiplication on those, and then re-combine into the 1308 * resulting 128-bit value. 1309 * 1310 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1311 * hi1 * hi2 << 64 + 1312 * hi1 * lo2 << 32 + 1313 * hi2 * lo1 << 32 + 1314 * lo1 * lo2 1315 */ 1316 static void 1317 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1318 { 1319 uint64_t hi1, hi2, lo1, lo2; 1320 uint64_t tmp[2]; 1321 1322 hi1 = factor1 >> 32; 1323 hi2 = factor2 >> 32; 1324 1325 lo1 = factor1 & DT_MASK_LO; 1326 lo2 = factor2 & DT_MASK_LO; 1327 1328 product[0] = lo1 * lo2; 1329 product[1] = hi1 * hi2; 1330 1331 tmp[0] = hi1 * lo2; 1332 tmp[1] = 0; 1333 dtrace_shift_128(tmp, 32); 1334 dtrace_add_128(product, tmp, product); 1335 1336 tmp[0] = hi2 * lo1; 1337 tmp[1] = 0; 1338 dtrace_shift_128(tmp, 32); 1339 dtrace_add_128(product, tmp, product); 1340 } 1341 1342 /* 1343 * This privilege check should be used by actions and subroutines to 1344 * verify that the user credentials of the process that enabled the 1345 * invoking ECB match the target credentials 1346 */ 1347 static int 1348 dtrace_priv_proc_common_user(dtrace_state_t *state) 1349 { 1350 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1351 1352 /* 1353 * We should always have a non-NULL state cred here, since if cred 1354 * is null (anonymous tracing), we fast-path bypass this routine. 1355 */ 1356 ASSERT(s_cr != NULL); 1357 1358 if ((cr = CRED()) != NULL && 1359 s_cr->cr_uid == cr->cr_uid && 1360 s_cr->cr_uid == cr->cr_ruid && 1361 s_cr->cr_uid == cr->cr_suid && 1362 s_cr->cr_gid == cr->cr_gid && 1363 s_cr->cr_gid == cr->cr_rgid && 1364 s_cr->cr_gid == cr->cr_sgid) 1365 return (1); 1366 1367 return (0); 1368 } 1369 1370 /* 1371 * This privilege check should be used by actions and subroutines to 1372 * verify that the zone of the process that enabled the invoking ECB 1373 * matches the target credentials 1374 */ 1375 static int 1376 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1377 { 1378 #ifdef illumos 1379 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1380 1381 /* 1382 * We should always have a non-NULL state cred here, since if cred 1383 * is null (anonymous tracing), we fast-path bypass this routine. 1384 */ 1385 ASSERT(s_cr != NULL); 1386 1387 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1388 return (1); 1389 1390 return (0); 1391 #else 1392 return (1); 1393 #endif 1394 } 1395 1396 /* 1397 * This privilege check should be used by actions and subroutines to 1398 * verify that the process has not setuid or changed credentials. 1399 */ 1400 static int 1401 dtrace_priv_proc_common_nocd(void) 1402 { 1403 proc_t *proc; 1404 1405 if ((proc = ttoproc(curthread)) != NULL && 1406 !(proc->p_flag & SNOCD)) 1407 return (1); 1408 1409 return (0); 1410 } 1411 1412 static int 1413 dtrace_priv_proc_destructive(dtrace_state_t *state) 1414 { 1415 int action = state->dts_cred.dcr_action; 1416 1417 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1418 dtrace_priv_proc_common_zone(state) == 0) 1419 goto bad; 1420 1421 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1422 dtrace_priv_proc_common_user(state) == 0) 1423 goto bad; 1424 1425 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1426 dtrace_priv_proc_common_nocd() == 0) 1427 goto bad; 1428 1429 return (1); 1430 1431 bad: 1432 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1433 1434 return (0); 1435 } 1436 1437 static int 1438 dtrace_priv_proc_control(dtrace_state_t *state) 1439 { 1440 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1441 return (1); 1442 1443 if (dtrace_priv_proc_common_zone(state) && 1444 dtrace_priv_proc_common_user(state) && 1445 dtrace_priv_proc_common_nocd()) 1446 return (1); 1447 1448 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1449 1450 return (0); 1451 } 1452 1453 static int 1454 dtrace_priv_proc(dtrace_state_t *state) 1455 { 1456 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1457 return (1); 1458 1459 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1460 1461 return (0); 1462 } 1463 1464 static int 1465 dtrace_priv_kernel(dtrace_state_t *state) 1466 { 1467 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1468 return (1); 1469 1470 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1471 1472 return (0); 1473 } 1474 1475 static int 1476 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1477 { 1478 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1479 return (1); 1480 1481 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1482 1483 return (0); 1484 } 1485 1486 /* 1487 * Determine if the dte_cond of the specified ECB allows for processing of 1488 * the current probe to continue. Note that this routine may allow continued 1489 * processing, but with access(es) stripped from the mstate's dtms_access 1490 * field. 1491 */ 1492 static int 1493 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1494 dtrace_ecb_t *ecb) 1495 { 1496 dtrace_probe_t *probe = ecb->dte_probe; 1497 dtrace_provider_t *prov = probe->dtpr_provider; 1498 dtrace_pops_t *pops = &prov->dtpv_pops; 1499 int mode = DTRACE_MODE_NOPRIV_DROP; 1500 1501 ASSERT(ecb->dte_cond); 1502 1503 #ifdef illumos 1504 if (pops->dtps_mode != NULL) { 1505 mode = pops->dtps_mode(prov->dtpv_arg, 1506 probe->dtpr_id, probe->dtpr_arg); 1507 1508 ASSERT((mode & DTRACE_MODE_USER) || 1509 (mode & DTRACE_MODE_KERNEL)); 1510 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1511 (mode & DTRACE_MODE_NOPRIV_DROP)); 1512 } 1513 1514 /* 1515 * If the dte_cond bits indicate that this consumer is only allowed to 1516 * see user-mode firings of this probe, call the provider's dtps_mode() 1517 * entry point to check that the probe was fired while in a user 1518 * context. If that's not the case, use the policy specified by the 1519 * provider to determine if we drop the probe or merely restrict 1520 * operation. 1521 */ 1522 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1523 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1524 1525 if (!(mode & DTRACE_MODE_USER)) { 1526 if (mode & DTRACE_MODE_NOPRIV_DROP) 1527 return (0); 1528 1529 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1530 } 1531 } 1532 #endif 1533 1534 /* 1535 * This is more subtle than it looks. We have to be absolutely certain 1536 * that CRED() isn't going to change out from under us so it's only 1537 * legit to examine that structure if we're in constrained situations. 1538 * Currently, the only times we'll this check is if a non-super-user 1539 * has enabled the profile or syscall providers -- providers that 1540 * allow visibility of all processes. For the profile case, the check 1541 * above will ensure that we're examining a user context. 1542 */ 1543 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1544 cred_t *cr; 1545 cred_t *s_cr = state->dts_cred.dcr_cred; 1546 proc_t *proc; 1547 1548 ASSERT(s_cr != NULL); 1549 1550 if ((cr = CRED()) == NULL || 1551 s_cr->cr_uid != cr->cr_uid || 1552 s_cr->cr_uid != cr->cr_ruid || 1553 s_cr->cr_uid != cr->cr_suid || 1554 s_cr->cr_gid != cr->cr_gid || 1555 s_cr->cr_gid != cr->cr_rgid || 1556 s_cr->cr_gid != cr->cr_sgid || 1557 (proc = ttoproc(curthread)) == NULL || 1558 (proc->p_flag & SNOCD)) { 1559 if (mode & DTRACE_MODE_NOPRIV_DROP) 1560 return (0); 1561 1562 #ifdef illumos 1563 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1564 #endif 1565 } 1566 } 1567 1568 #ifdef illumos 1569 /* 1570 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1571 * in our zone, check to see if our mode policy is to restrict rather 1572 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1573 * and DTRACE_ACCESS_ARGS 1574 */ 1575 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1576 cred_t *cr; 1577 cred_t *s_cr = state->dts_cred.dcr_cred; 1578 1579 ASSERT(s_cr != NULL); 1580 1581 if ((cr = CRED()) == NULL || 1582 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1583 if (mode & DTRACE_MODE_NOPRIV_DROP) 1584 return (0); 1585 1586 mstate->dtms_access &= 1587 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1588 } 1589 } 1590 #endif 1591 1592 return (1); 1593 } 1594 1595 /* 1596 * Note: not called from probe context. This function is called 1597 * asynchronously (and at a regular interval) from outside of probe context to 1598 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1599 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1600 */ 1601 void 1602 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1603 { 1604 dtrace_dynvar_t *dirty; 1605 dtrace_dstate_percpu_t *dcpu; 1606 dtrace_dynvar_t **rinsep; 1607 int i, j, work = 0; 1608 1609 for (i = 0; i < NCPU; i++) { 1610 dcpu = &dstate->dtds_percpu[i]; 1611 rinsep = &dcpu->dtdsc_rinsing; 1612 1613 /* 1614 * If the dirty list is NULL, there is no dirty work to do. 1615 */ 1616 if (dcpu->dtdsc_dirty == NULL) 1617 continue; 1618 1619 if (dcpu->dtdsc_rinsing != NULL) { 1620 /* 1621 * If the rinsing list is non-NULL, then it is because 1622 * this CPU was selected to accept another CPU's 1623 * dirty list -- and since that time, dirty buffers 1624 * have accumulated. This is a highly unlikely 1625 * condition, but we choose to ignore the dirty 1626 * buffers -- they'll be picked up a future cleanse. 1627 */ 1628 continue; 1629 } 1630 1631 if (dcpu->dtdsc_clean != NULL) { 1632 /* 1633 * If the clean list is non-NULL, then we're in a 1634 * situation where a CPU has done deallocations (we 1635 * have a non-NULL dirty list) but no allocations (we 1636 * also have a non-NULL clean list). We can't simply 1637 * move the dirty list into the clean list on this 1638 * CPU, yet we also don't want to allow this condition 1639 * to persist, lest a short clean list prevent a 1640 * massive dirty list from being cleaned (which in 1641 * turn could lead to otherwise avoidable dynamic 1642 * drops). To deal with this, we look for some CPU 1643 * with a NULL clean list, NULL dirty list, and NULL 1644 * rinsing list -- and then we borrow this CPU to 1645 * rinse our dirty list. 1646 */ 1647 for (j = 0; j < NCPU; j++) { 1648 dtrace_dstate_percpu_t *rinser; 1649 1650 rinser = &dstate->dtds_percpu[j]; 1651 1652 if (rinser->dtdsc_rinsing != NULL) 1653 continue; 1654 1655 if (rinser->dtdsc_dirty != NULL) 1656 continue; 1657 1658 if (rinser->dtdsc_clean != NULL) 1659 continue; 1660 1661 rinsep = &rinser->dtdsc_rinsing; 1662 break; 1663 } 1664 1665 if (j == NCPU) { 1666 /* 1667 * We were unable to find another CPU that 1668 * could accept this dirty list -- we are 1669 * therefore unable to clean it now. 1670 */ 1671 dtrace_dynvar_failclean++; 1672 continue; 1673 } 1674 } 1675 1676 work = 1; 1677 1678 /* 1679 * Atomically move the dirty list aside. 1680 */ 1681 do { 1682 dirty = dcpu->dtdsc_dirty; 1683 1684 /* 1685 * Before we zap the dirty list, set the rinsing list. 1686 * (This allows for a potential assertion in 1687 * dtrace_dynvar(): if a free dynamic variable appears 1688 * on a hash chain, either the dirty list or the 1689 * rinsing list for some CPU must be non-NULL.) 1690 */ 1691 *rinsep = dirty; 1692 dtrace_membar_producer(); 1693 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1694 dirty, NULL) != dirty); 1695 } 1696 1697 if (!work) { 1698 /* 1699 * We have no work to do; we can simply return. 1700 */ 1701 return; 1702 } 1703 1704 dtrace_sync(); 1705 1706 for (i = 0; i < NCPU; i++) { 1707 dcpu = &dstate->dtds_percpu[i]; 1708 1709 if (dcpu->dtdsc_rinsing == NULL) 1710 continue; 1711 1712 /* 1713 * We are now guaranteed that no hash chain contains a pointer 1714 * into this dirty list; we can make it clean. 1715 */ 1716 ASSERT(dcpu->dtdsc_clean == NULL); 1717 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1718 dcpu->dtdsc_rinsing = NULL; 1719 } 1720 1721 /* 1722 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1723 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1724 * This prevents a race whereby a CPU incorrectly decides that 1725 * the state should be something other than DTRACE_DSTATE_CLEAN 1726 * after dtrace_dynvar_clean() has completed. 1727 */ 1728 dtrace_sync(); 1729 1730 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1731 } 1732 1733 /* 1734 * Depending on the value of the op parameter, this function looks-up, 1735 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1736 * allocation is requested, this function will return a pointer to a 1737 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1738 * variable can be allocated. If NULL is returned, the appropriate counter 1739 * will be incremented. 1740 */ 1741 dtrace_dynvar_t * 1742 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1743 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1744 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1745 { 1746 uint64_t hashval = DTRACE_DYNHASH_VALID; 1747 dtrace_dynhash_t *hash = dstate->dtds_hash; 1748 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1749 processorid_t me = curcpu, cpu = me; 1750 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1751 size_t bucket, ksize; 1752 size_t chunksize = dstate->dtds_chunksize; 1753 uintptr_t kdata, lock, nstate; 1754 uint_t i; 1755 1756 ASSERT(nkeys != 0); 1757 1758 /* 1759 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1760 * algorithm. For the by-value portions, we perform the algorithm in 1761 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1762 * bit, and seems to have only a minute effect on distribution. For 1763 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1764 * over each referenced byte. It's painful to do this, but it's much 1765 * better than pathological hash distribution. The efficacy of the 1766 * hashing algorithm (and a comparison with other algorithms) may be 1767 * found by running the ::dtrace_dynstat MDB dcmd. 1768 */ 1769 for (i = 0; i < nkeys; i++) { 1770 if (key[i].dttk_size == 0) { 1771 uint64_t val = key[i].dttk_value; 1772 1773 hashval += (val >> 48) & 0xffff; 1774 hashval += (hashval << 10); 1775 hashval ^= (hashval >> 6); 1776 1777 hashval += (val >> 32) & 0xffff; 1778 hashval += (hashval << 10); 1779 hashval ^= (hashval >> 6); 1780 1781 hashval += (val >> 16) & 0xffff; 1782 hashval += (hashval << 10); 1783 hashval ^= (hashval >> 6); 1784 1785 hashval += val & 0xffff; 1786 hashval += (hashval << 10); 1787 hashval ^= (hashval >> 6); 1788 } else { 1789 /* 1790 * This is incredibly painful, but it beats the hell 1791 * out of the alternative. 1792 */ 1793 uint64_t j, size = key[i].dttk_size; 1794 uintptr_t base = (uintptr_t)key[i].dttk_value; 1795 1796 if (!dtrace_canload(base, size, mstate, vstate)) 1797 break; 1798 1799 for (j = 0; j < size; j++) { 1800 hashval += dtrace_load8(base + j); 1801 hashval += (hashval << 10); 1802 hashval ^= (hashval >> 6); 1803 } 1804 } 1805 } 1806 1807 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1808 return (NULL); 1809 1810 hashval += (hashval << 3); 1811 hashval ^= (hashval >> 11); 1812 hashval += (hashval << 15); 1813 1814 /* 1815 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1816 * comes out to be one of our two sentinel hash values. If this 1817 * actually happens, we set the hashval to be a value known to be a 1818 * non-sentinel value. 1819 */ 1820 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1821 hashval = DTRACE_DYNHASH_VALID; 1822 1823 /* 1824 * Yes, it's painful to do a divide here. If the cycle count becomes 1825 * important here, tricks can be pulled to reduce it. (However, it's 1826 * critical that hash collisions be kept to an absolute minimum; 1827 * they're much more painful than a divide.) It's better to have a 1828 * solution that generates few collisions and still keeps things 1829 * relatively simple. 1830 */ 1831 bucket = hashval % dstate->dtds_hashsize; 1832 1833 if (op == DTRACE_DYNVAR_DEALLOC) { 1834 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1835 1836 for (;;) { 1837 while ((lock = *lockp) & 1) 1838 continue; 1839 1840 if (dtrace_casptr((volatile void *)lockp, 1841 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1842 break; 1843 } 1844 1845 dtrace_membar_producer(); 1846 } 1847 1848 top: 1849 prev = NULL; 1850 lock = hash[bucket].dtdh_lock; 1851 1852 dtrace_membar_consumer(); 1853 1854 start = hash[bucket].dtdh_chain; 1855 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1856 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1857 op != DTRACE_DYNVAR_DEALLOC)); 1858 1859 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1860 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1861 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1862 1863 if (dvar->dtdv_hashval != hashval) { 1864 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1865 /* 1866 * We've reached the sink, and therefore the 1867 * end of the hash chain; we can kick out of 1868 * the loop knowing that we have seen a valid 1869 * snapshot of state. 1870 */ 1871 ASSERT(dvar->dtdv_next == NULL); 1872 ASSERT(dvar == &dtrace_dynhash_sink); 1873 break; 1874 } 1875 1876 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1877 /* 1878 * We've gone off the rails: somewhere along 1879 * the line, one of the members of this hash 1880 * chain was deleted. Note that we could also 1881 * detect this by simply letting this loop run 1882 * to completion, as we would eventually hit 1883 * the end of the dirty list. However, we 1884 * want to avoid running the length of the 1885 * dirty list unnecessarily (it might be quite 1886 * long), so we catch this as early as 1887 * possible by detecting the hash marker. In 1888 * this case, we simply set dvar to NULL and 1889 * break; the conditional after the loop will 1890 * send us back to top. 1891 */ 1892 dvar = NULL; 1893 break; 1894 } 1895 1896 goto next; 1897 } 1898 1899 if (dtuple->dtt_nkeys != nkeys) 1900 goto next; 1901 1902 for (i = 0; i < nkeys; i++, dkey++) { 1903 if (dkey->dttk_size != key[i].dttk_size) 1904 goto next; /* size or type mismatch */ 1905 1906 if (dkey->dttk_size != 0) { 1907 if (dtrace_bcmp( 1908 (void *)(uintptr_t)key[i].dttk_value, 1909 (void *)(uintptr_t)dkey->dttk_value, 1910 dkey->dttk_size)) 1911 goto next; 1912 } else { 1913 if (dkey->dttk_value != key[i].dttk_value) 1914 goto next; 1915 } 1916 } 1917 1918 if (op != DTRACE_DYNVAR_DEALLOC) 1919 return (dvar); 1920 1921 ASSERT(dvar->dtdv_next == NULL || 1922 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1923 1924 if (prev != NULL) { 1925 ASSERT(hash[bucket].dtdh_chain != dvar); 1926 ASSERT(start != dvar); 1927 ASSERT(prev->dtdv_next == dvar); 1928 prev->dtdv_next = dvar->dtdv_next; 1929 } else { 1930 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1931 start, dvar->dtdv_next) != start) { 1932 /* 1933 * We have failed to atomically swing the 1934 * hash table head pointer, presumably because 1935 * of a conflicting allocation on another CPU. 1936 * We need to reread the hash chain and try 1937 * again. 1938 */ 1939 goto top; 1940 } 1941 } 1942 1943 dtrace_membar_producer(); 1944 1945 /* 1946 * Now set the hash value to indicate that it's free. 1947 */ 1948 ASSERT(hash[bucket].dtdh_chain != dvar); 1949 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1950 1951 dtrace_membar_producer(); 1952 1953 /* 1954 * Set the next pointer to point at the dirty list, and 1955 * atomically swing the dirty pointer to the newly freed dvar. 1956 */ 1957 do { 1958 next = dcpu->dtdsc_dirty; 1959 dvar->dtdv_next = next; 1960 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1961 1962 /* 1963 * Finally, unlock this hash bucket. 1964 */ 1965 ASSERT(hash[bucket].dtdh_lock == lock); 1966 ASSERT(lock & 1); 1967 hash[bucket].dtdh_lock++; 1968 1969 return (NULL); 1970 next: 1971 prev = dvar; 1972 continue; 1973 } 1974 1975 if (dvar == NULL) { 1976 /* 1977 * If dvar is NULL, it is because we went off the rails: 1978 * one of the elements that we traversed in the hash chain 1979 * was deleted while we were traversing it. In this case, 1980 * we assert that we aren't doing a dealloc (deallocs lock 1981 * the hash bucket to prevent themselves from racing with 1982 * one another), and retry the hash chain traversal. 1983 */ 1984 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1985 goto top; 1986 } 1987 1988 if (op != DTRACE_DYNVAR_ALLOC) { 1989 /* 1990 * If we are not to allocate a new variable, we want to 1991 * return NULL now. Before we return, check that the value 1992 * of the lock word hasn't changed. If it has, we may have 1993 * seen an inconsistent snapshot. 1994 */ 1995 if (op == DTRACE_DYNVAR_NOALLOC) { 1996 if (hash[bucket].dtdh_lock != lock) 1997 goto top; 1998 } else { 1999 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 2000 ASSERT(hash[bucket].dtdh_lock == lock); 2001 ASSERT(lock & 1); 2002 hash[bucket].dtdh_lock++; 2003 } 2004 2005 return (NULL); 2006 } 2007 2008 /* 2009 * We need to allocate a new dynamic variable. The size we need is the 2010 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 2011 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 2012 * the size of any referred-to data (dsize). We then round the final 2013 * size up to the chunksize for allocation. 2014 */ 2015 for (ksize = 0, i = 0; i < nkeys; i++) 2016 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 2017 2018 /* 2019 * This should be pretty much impossible, but could happen if, say, 2020 * strange DIF specified the tuple. Ideally, this should be an 2021 * assertion and not an error condition -- but that requires that the 2022 * chunksize calculation in dtrace_difo_chunksize() be absolutely 2023 * bullet-proof. (That is, it must not be able to be fooled by 2024 * malicious DIF.) Given the lack of backwards branches in DIF, 2025 * solving this would presumably not amount to solving the Halting 2026 * Problem -- but it still seems awfully hard. 2027 */ 2028 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 2029 ksize + dsize > chunksize) { 2030 dcpu->dtdsc_drops++; 2031 return (NULL); 2032 } 2033 2034 nstate = DTRACE_DSTATE_EMPTY; 2035 2036 do { 2037 retry: 2038 free = dcpu->dtdsc_free; 2039 2040 if (free == NULL) { 2041 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 2042 void *rval; 2043 2044 if (clean == NULL) { 2045 /* 2046 * We're out of dynamic variable space on 2047 * this CPU. Unless we have tried all CPUs, 2048 * we'll try to allocate from a different 2049 * CPU. 2050 */ 2051 switch (dstate->dtds_state) { 2052 case DTRACE_DSTATE_CLEAN: { 2053 void *sp = &dstate->dtds_state; 2054 2055 if (++cpu >= NCPU) 2056 cpu = 0; 2057 2058 if (dcpu->dtdsc_dirty != NULL && 2059 nstate == DTRACE_DSTATE_EMPTY) 2060 nstate = DTRACE_DSTATE_DIRTY; 2061 2062 if (dcpu->dtdsc_rinsing != NULL) 2063 nstate = DTRACE_DSTATE_RINSING; 2064 2065 dcpu = &dstate->dtds_percpu[cpu]; 2066 2067 if (cpu != me) 2068 goto retry; 2069 2070 (void) dtrace_cas32(sp, 2071 DTRACE_DSTATE_CLEAN, nstate); 2072 2073 /* 2074 * To increment the correct bean 2075 * counter, take another lap. 2076 */ 2077 goto retry; 2078 } 2079 2080 case DTRACE_DSTATE_DIRTY: 2081 dcpu->dtdsc_dirty_drops++; 2082 break; 2083 2084 case DTRACE_DSTATE_RINSING: 2085 dcpu->dtdsc_rinsing_drops++; 2086 break; 2087 2088 case DTRACE_DSTATE_EMPTY: 2089 dcpu->dtdsc_drops++; 2090 break; 2091 } 2092 2093 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2094 return (NULL); 2095 } 2096 2097 /* 2098 * The clean list appears to be non-empty. We want to 2099 * move the clean list to the free list; we start by 2100 * moving the clean pointer aside. 2101 */ 2102 if (dtrace_casptr(&dcpu->dtdsc_clean, 2103 clean, NULL) != clean) { 2104 /* 2105 * We are in one of two situations: 2106 * 2107 * (a) The clean list was switched to the 2108 * free list by another CPU. 2109 * 2110 * (b) The clean list was added to by the 2111 * cleansing cyclic. 2112 * 2113 * In either of these situations, we can 2114 * just reattempt the free list allocation. 2115 */ 2116 goto retry; 2117 } 2118 2119 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2120 2121 /* 2122 * Now we'll move the clean list to our free list. 2123 * It's impossible for this to fail: the only way 2124 * the free list can be updated is through this 2125 * code path, and only one CPU can own the clean list. 2126 * Thus, it would only be possible for this to fail if 2127 * this code were racing with dtrace_dynvar_clean(). 2128 * (That is, if dtrace_dynvar_clean() updated the clean 2129 * list, and we ended up racing to update the free 2130 * list.) This race is prevented by the dtrace_sync() 2131 * in dtrace_dynvar_clean() -- which flushes the 2132 * owners of the clean lists out before resetting 2133 * the clean lists. 2134 */ 2135 dcpu = &dstate->dtds_percpu[me]; 2136 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2137 ASSERT(rval == NULL); 2138 goto retry; 2139 } 2140 2141 dvar = free; 2142 new_free = dvar->dtdv_next; 2143 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2144 2145 /* 2146 * We have now allocated a new chunk. We copy the tuple keys into the 2147 * tuple array and copy any referenced key data into the data space 2148 * following the tuple array. As we do this, we relocate dttk_value 2149 * in the final tuple to point to the key data address in the chunk. 2150 */ 2151 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2152 dvar->dtdv_data = (void *)(kdata + ksize); 2153 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2154 2155 for (i = 0; i < nkeys; i++) { 2156 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2157 size_t kesize = key[i].dttk_size; 2158 2159 if (kesize != 0) { 2160 dtrace_bcopy( 2161 (const void *)(uintptr_t)key[i].dttk_value, 2162 (void *)kdata, kesize); 2163 dkey->dttk_value = kdata; 2164 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2165 } else { 2166 dkey->dttk_value = key[i].dttk_value; 2167 } 2168 2169 dkey->dttk_size = kesize; 2170 } 2171 2172 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2173 dvar->dtdv_hashval = hashval; 2174 dvar->dtdv_next = start; 2175 2176 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2177 return (dvar); 2178 2179 /* 2180 * The cas has failed. Either another CPU is adding an element to 2181 * this hash chain, or another CPU is deleting an element from this 2182 * hash chain. The simplest way to deal with both of these cases 2183 * (though not necessarily the most efficient) is to free our 2184 * allocated block and re-attempt it all. Note that the free is 2185 * to the dirty list and _not_ to the free list. This is to prevent 2186 * races with allocators, above. 2187 */ 2188 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2189 2190 dtrace_membar_producer(); 2191 2192 do { 2193 free = dcpu->dtdsc_dirty; 2194 dvar->dtdv_next = free; 2195 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2196 2197 goto top; 2198 } 2199 2200 /*ARGSUSED*/ 2201 static void 2202 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2203 { 2204 if ((int64_t)nval < (int64_t)*oval) 2205 *oval = nval; 2206 } 2207 2208 /*ARGSUSED*/ 2209 static void 2210 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2211 { 2212 if ((int64_t)nval > (int64_t)*oval) 2213 *oval = nval; 2214 } 2215 2216 static void 2217 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2218 { 2219 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2220 int64_t val = (int64_t)nval; 2221 2222 if (val < 0) { 2223 for (i = 0; i < zero; i++) { 2224 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2225 quanta[i] += incr; 2226 return; 2227 } 2228 } 2229 } else { 2230 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2231 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2232 quanta[i - 1] += incr; 2233 return; 2234 } 2235 } 2236 2237 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2238 return; 2239 } 2240 2241 ASSERT(0); 2242 } 2243 2244 static void 2245 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2246 { 2247 uint64_t arg = *lquanta++; 2248 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2249 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2250 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2251 int32_t val = (int32_t)nval, level; 2252 2253 ASSERT(step != 0); 2254 ASSERT(levels != 0); 2255 2256 if (val < base) { 2257 /* 2258 * This is an underflow. 2259 */ 2260 lquanta[0] += incr; 2261 return; 2262 } 2263 2264 level = (val - base) / step; 2265 2266 if (level < levels) { 2267 lquanta[level + 1] += incr; 2268 return; 2269 } 2270 2271 /* 2272 * This is an overflow. 2273 */ 2274 lquanta[levels + 1] += incr; 2275 } 2276 2277 static int 2278 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2279 uint16_t high, uint16_t nsteps, int64_t value) 2280 { 2281 int64_t this = 1, last, next; 2282 int base = 1, order; 2283 2284 ASSERT(factor <= nsteps); 2285 ASSERT(nsteps % factor == 0); 2286 2287 for (order = 0; order < low; order++) 2288 this *= factor; 2289 2290 /* 2291 * If our value is less than our factor taken to the power of the 2292 * low order of magnitude, it goes into the zeroth bucket. 2293 */ 2294 if (value < (last = this)) 2295 return (0); 2296 2297 for (this *= factor; order <= high; order++) { 2298 int nbuckets = this > nsteps ? nsteps : this; 2299 2300 if ((next = this * factor) < this) { 2301 /* 2302 * We should not generally get log/linear quantizations 2303 * with a high magnitude that allows 64-bits to 2304 * overflow, but we nonetheless protect against this 2305 * by explicitly checking for overflow, and clamping 2306 * our value accordingly. 2307 */ 2308 value = this - 1; 2309 } 2310 2311 if (value < this) { 2312 /* 2313 * If our value lies within this order of magnitude, 2314 * determine its position by taking the offset within 2315 * the order of magnitude, dividing by the bucket 2316 * width, and adding to our (accumulated) base. 2317 */ 2318 return (base + (value - last) / (this / nbuckets)); 2319 } 2320 2321 base += nbuckets - (nbuckets / factor); 2322 last = this; 2323 this = next; 2324 } 2325 2326 /* 2327 * Our value is greater than or equal to our factor taken to the 2328 * power of one plus the high magnitude -- return the top bucket. 2329 */ 2330 return (base); 2331 } 2332 2333 static void 2334 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2335 { 2336 uint64_t arg = *llquanta++; 2337 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2338 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2339 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2340 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2341 2342 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2343 low, high, nsteps, nval)] += incr; 2344 } 2345 2346 /*ARGSUSED*/ 2347 static void 2348 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2349 { 2350 data[0]++; 2351 data[1] += nval; 2352 } 2353 2354 /*ARGSUSED*/ 2355 static void 2356 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2357 { 2358 int64_t snval = (int64_t)nval; 2359 uint64_t tmp[2]; 2360 2361 data[0]++; 2362 data[1] += nval; 2363 2364 /* 2365 * What we want to say here is: 2366 * 2367 * data[2] += nval * nval; 2368 * 2369 * But given that nval is 64-bit, we could easily overflow, so 2370 * we do this as 128-bit arithmetic. 2371 */ 2372 if (snval < 0) 2373 snval = -snval; 2374 2375 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2376 dtrace_add_128(data + 2, tmp, data + 2); 2377 } 2378 2379 /*ARGSUSED*/ 2380 static void 2381 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2382 { 2383 *oval = *oval + 1; 2384 } 2385 2386 /*ARGSUSED*/ 2387 static void 2388 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2389 { 2390 *oval += nval; 2391 } 2392 2393 /* 2394 * Aggregate given the tuple in the principal data buffer, and the aggregating 2395 * action denoted by the specified dtrace_aggregation_t. The aggregation 2396 * buffer is specified as the buf parameter. This routine does not return 2397 * failure; if there is no space in the aggregation buffer, the data will be 2398 * dropped, and a corresponding counter incremented. 2399 */ 2400 static void 2401 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2402 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2403 { 2404 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2405 uint32_t i, ndx, size, fsize; 2406 uint32_t align = sizeof (uint64_t) - 1; 2407 dtrace_aggbuffer_t *agb; 2408 dtrace_aggkey_t *key; 2409 uint32_t hashval = 0, limit, isstr; 2410 caddr_t tomax, data, kdata; 2411 dtrace_actkind_t action; 2412 dtrace_action_t *act; 2413 uintptr_t offs; 2414 2415 if (buf == NULL) 2416 return; 2417 2418 if (!agg->dtag_hasarg) { 2419 /* 2420 * Currently, only quantize() and lquantize() take additional 2421 * arguments, and they have the same semantics: an increment 2422 * value that defaults to 1 when not present. If additional 2423 * aggregating actions take arguments, the setting of the 2424 * default argument value will presumably have to become more 2425 * sophisticated... 2426 */ 2427 arg = 1; 2428 } 2429 2430 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2431 size = rec->dtrd_offset - agg->dtag_base; 2432 fsize = size + rec->dtrd_size; 2433 2434 ASSERT(dbuf->dtb_tomax != NULL); 2435 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2436 2437 if ((tomax = buf->dtb_tomax) == NULL) { 2438 dtrace_buffer_drop(buf); 2439 return; 2440 } 2441 2442 /* 2443 * The metastructure is always at the bottom of the buffer. 2444 */ 2445 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2446 sizeof (dtrace_aggbuffer_t)); 2447 2448 if (buf->dtb_offset == 0) { 2449 /* 2450 * We just kludge up approximately 1/8th of the size to be 2451 * buckets. If this guess ends up being routinely 2452 * off-the-mark, we may need to dynamically readjust this 2453 * based on past performance. 2454 */ 2455 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2456 2457 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2458 (uintptr_t)tomax || hashsize == 0) { 2459 /* 2460 * We've been given a ludicrously small buffer; 2461 * increment our drop count and leave. 2462 */ 2463 dtrace_buffer_drop(buf); 2464 return; 2465 } 2466 2467 /* 2468 * And now, a pathetic attempt to try to get a an odd (or 2469 * perchance, a prime) hash size for better hash distribution. 2470 */ 2471 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2472 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2473 2474 agb->dtagb_hashsize = hashsize; 2475 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2476 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2477 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2478 2479 for (i = 0; i < agb->dtagb_hashsize; i++) 2480 agb->dtagb_hash[i] = NULL; 2481 } 2482 2483 ASSERT(agg->dtag_first != NULL); 2484 ASSERT(agg->dtag_first->dta_intuple); 2485 2486 /* 2487 * Calculate the hash value based on the key. Note that we _don't_ 2488 * include the aggid in the hashing (but we will store it as part of 2489 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2490 * algorithm: a simple, quick algorithm that has no known funnels, and 2491 * gets good distribution in practice. The efficacy of the hashing 2492 * algorithm (and a comparison with other algorithms) may be found by 2493 * running the ::dtrace_aggstat MDB dcmd. 2494 */ 2495 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2496 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2497 limit = i + act->dta_rec.dtrd_size; 2498 ASSERT(limit <= size); 2499 isstr = DTRACEACT_ISSTRING(act); 2500 2501 for (; i < limit; i++) { 2502 hashval += data[i]; 2503 hashval += (hashval << 10); 2504 hashval ^= (hashval >> 6); 2505 2506 if (isstr && data[i] == '\0') 2507 break; 2508 } 2509 } 2510 2511 hashval += (hashval << 3); 2512 hashval ^= (hashval >> 11); 2513 hashval += (hashval << 15); 2514 2515 /* 2516 * Yes, the divide here is expensive -- but it's generally the least 2517 * of the performance issues given the amount of data that we iterate 2518 * over to compute hash values, compare data, etc. 2519 */ 2520 ndx = hashval % agb->dtagb_hashsize; 2521 2522 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2523 ASSERT((caddr_t)key >= tomax); 2524 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2525 2526 if (hashval != key->dtak_hashval || key->dtak_size != size) 2527 continue; 2528 2529 kdata = key->dtak_data; 2530 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2531 2532 for (act = agg->dtag_first; act->dta_intuple; 2533 act = act->dta_next) { 2534 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2535 limit = i + act->dta_rec.dtrd_size; 2536 ASSERT(limit <= size); 2537 isstr = DTRACEACT_ISSTRING(act); 2538 2539 for (; i < limit; i++) { 2540 if (kdata[i] != data[i]) 2541 goto next; 2542 2543 if (isstr && data[i] == '\0') 2544 break; 2545 } 2546 } 2547 2548 if (action != key->dtak_action) { 2549 /* 2550 * We are aggregating on the same value in the same 2551 * aggregation with two different aggregating actions. 2552 * (This should have been picked up in the compiler, 2553 * so we may be dealing with errant or devious DIF.) 2554 * This is an error condition; we indicate as much, 2555 * and return. 2556 */ 2557 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2558 return; 2559 } 2560 2561 /* 2562 * This is a hit: we need to apply the aggregator to 2563 * the value at this key. 2564 */ 2565 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2566 return; 2567 next: 2568 continue; 2569 } 2570 2571 /* 2572 * We didn't find it. We need to allocate some zero-filled space, 2573 * link it into the hash table appropriately, and apply the aggregator 2574 * to the (zero-filled) value. 2575 */ 2576 offs = buf->dtb_offset; 2577 while (offs & (align - 1)) 2578 offs += sizeof (uint32_t); 2579 2580 /* 2581 * If we don't have enough room to both allocate a new key _and_ 2582 * its associated data, increment the drop count and return. 2583 */ 2584 if ((uintptr_t)tomax + offs + fsize > 2585 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2586 dtrace_buffer_drop(buf); 2587 return; 2588 } 2589 2590 /*CONSTCOND*/ 2591 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2592 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2593 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2594 2595 key->dtak_data = kdata = tomax + offs; 2596 buf->dtb_offset = offs + fsize; 2597 2598 /* 2599 * Now copy the data across. 2600 */ 2601 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2602 2603 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2604 kdata[i] = data[i]; 2605 2606 /* 2607 * Because strings are not zeroed out by default, we need to iterate 2608 * looking for actions that store strings, and we need to explicitly 2609 * pad these strings out with zeroes. 2610 */ 2611 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2612 int nul; 2613 2614 if (!DTRACEACT_ISSTRING(act)) 2615 continue; 2616 2617 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2618 limit = i + act->dta_rec.dtrd_size; 2619 ASSERT(limit <= size); 2620 2621 for (nul = 0; i < limit; i++) { 2622 if (nul) { 2623 kdata[i] = '\0'; 2624 continue; 2625 } 2626 2627 if (data[i] != '\0') 2628 continue; 2629 2630 nul = 1; 2631 } 2632 } 2633 2634 for (i = size; i < fsize; i++) 2635 kdata[i] = 0; 2636 2637 key->dtak_hashval = hashval; 2638 key->dtak_size = size; 2639 key->dtak_action = action; 2640 key->dtak_next = agb->dtagb_hash[ndx]; 2641 agb->dtagb_hash[ndx] = key; 2642 2643 /* 2644 * Finally, apply the aggregator. 2645 */ 2646 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2647 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2648 } 2649 2650 /* 2651 * Given consumer state, this routine finds a speculation in the INACTIVE 2652 * state and transitions it into the ACTIVE state. If there is no speculation 2653 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2654 * incremented -- it is up to the caller to take appropriate action. 2655 */ 2656 static int 2657 dtrace_speculation(dtrace_state_t *state) 2658 { 2659 int i = 0; 2660 dtrace_speculation_state_t current; 2661 uint32_t *stat = &state->dts_speculations_unavail, count; 2662 2663 while (i < state->dts_nspeculations) { 2664 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2665 2666 current = spec->dtsp_state; 2667 2668 if (current != DTRACESPEC_INACTIVE) { 2669 if (current == DTRACESPEC_COMMITTINGMANY || 2670 current == DTRACESPEC_COMMITTING || 2671 current == DTRACESPEC_DISCARDING) 2672 stat = &state->dts_speculations_busy; 2673 i++; 2674 continue; 2675 } 2676 2677 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2678 current, DTRACESPEC_ACTIVE) == current) 2679 return (i + 1); 2680 } 2681 2682 /* 2683 * We couldn't find a speculation. If we found as much as a single 2684 * busy speculation buffer, we'll attribute this failure as "busy" 2685 * instead of "unavail". 2686 */ 2687 do { 2688 count = *stat; 2689 } while (dtrace_cas32(stat, count, count + 1) != count); 2690 2691 return (0); 2692 } 2693 2694 /* 2695 * This routine commits an active speculation. If the specified speculation 2696 * is not in a valid state to perform a commit(), this routine will silently do 2697 * nothing. The state of the specified speculation is transitioned according 2698 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2699 */ 2700 static void 2701 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2702 dtrace_specid_t which) 2703 { 2704 dtrace_speculation_t *spec; 2705 dtrace_buffer_t *src, *dest; 2706 uintptr_t daddr, saddr, dlimit, slimit; 2707 dtrace_speculation_state_t current, new = 0; 2708 intptr_t offs; 2709 uint64_t timestamp; 2710 2711 if (which == 0) 2712 return; 2713 2714 if (which > state->dts_nspeculations) { 2715 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2716 return; 2717 } 2718 2719 spec = &state->dts_speculations[which - 1]; 2720 src = &spec->dtsp_buffer[cpu]; 2721 dest = &state->dts_buffer[cpu]; 2722 2723 do { 2724 current = spec->dtsp_state; 2725 2726 if (current == DTRACESPEC_COMMITTINGMANY) 2727 break; 2728 2729 switch (current) { 2730 case DTRACESPEC_INACTIVE: 2731 case DTRACESPEC_DISCARDING: 2732 return; 2733 2734 case DTRACESPEC_COMMITTING: 2735 /* 2736 * This is only possible if we are (a) commit()'ing 2737 * without having done a prior speculate() on this CPU 2738 * and (b) racing with another commit() on a different 2739 * CPU. There's nothing to do -- we just assert that 2740 * our offset is 0. 2741 */ 2742 ASSERT(src->dtb_offset == 0); 2743 return; 2744 2745 case DTRACESPEC_ACTIVE: 2746 new = DTRACESPEC_COMMITTING; 2747 break; 2748 2749 case DTRACESPEC_ACTIVEONE: 2750 /* 2751 * This speculation is active on one CPU. If our 2752 * buffer offset is non-zero, we know that the one CPU 2753 * must be us. Otherwise, we are committing on a 2754 * different CPU from the speculate(), and we must 2755 * rely on being asynchronously cleaned. 2756 */ 2757 if (src->dtb_offset != 0) { 2758 new = DTRACESPEC_COMMITTING; 2759 break; 2760 } 2761 /*FALLTHROUGH*/ 2762 2763 case DTRACESPEC_ACTIVEMANY: 2764 new = DTRACESPEC_COMMITTINGMANY; 2765 break; 2766 2767 default: 2768 ASSERT(0); 2769 } 2770 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2771 current, new) != current); 2772 2773 /* 2774 * We have set the state to indicate that we are committing this 2775 * speculation. Now reserve the necessary space in the destination 2776 * buffer. 2777 */ 2778 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2779 sizeof (uint64_t), state, NULL)) < 0) { 2780 dtrace_buffer_drop(dest); 2781 goto out; 2782 } 2783 2784 /* 2785 * We have sufficient space to copy the speculative buffer into the 2786 * primary buffer. First, modify the speculative buffer, filling 2787 * in the timestamp of all entries with the current time. The data 2788 * must have the commit() time rather than the time it was traced, 2789 * so that all entries in the primary buffer are in timestamp order. 2790 */ 2791 timestamp = dtrace_gethrtime(); 2792 saddr = (uintptr_t)src->dtb_tomax; 2793 slimit = saddr + src->dtb_offset; 2794 while (saddr < slimit) { 2795 size_t size; 2796 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2797 2798 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2799 saddr += sizeof (dtrace_epid_t); 2800 continue; 2801 } 2802 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2803 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2804 2805 ASSERT3U(saddr + size, <=, slimit); 2806 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2807 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2808 2809 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2810 2811 saddr += size; 2812 } 2813 2814 /* 2815 * Copy the buffer across. (Note that this is a 2816 * highly subobtimal bcopy(); in the unlikely event that this becomes 2817 * a serious performance issue, a high-performance DTrace-specific 2818 * bcopy() should obviously be invented.) 2819 */ 2820 daddr = (uintptr_t)dest->dtb_tomax + offs; 2821 dlimit = daddr + src->dtb_offset; 2822 saddr = (uintptr_t)src->dtb_tomax; 2823 2824 /* 2825 * First, the aligned portion. 2826 */ 2827 while (dlimit - daddr >= sizeof (uint64_t)) { 2828 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2829 2830 daddr += sizeof (uint64_t); 2831 saddr += sizeof (uint64_t); 2832 } 2833 2834 /* 2835 * Now any left-over bit... 2836 */ 2837 while (dlimit - daddr) 2838 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2839 2840 /* 2841 * Finally, commit the reserved space in the destination buffer. 2842 */ 2843 dest->dtb_offset = offs + src->dtb_offset; 2844 2845 out: 2846 /* 2847 * If we're lucky enough to be the only active CPU on this speculation 2848 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2849 */ 2850 if (current == DTRACESPEC_ACTIVE || 2851 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2852 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2853 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2854 2855 ASSERT(rval == DTRACESPEC_COMMITTING); 2856 } 2857 2858 src->dtb_offset = 0; 2859 src->dtb_xamot_drops += src->dtb_drops; 2860 src->dtb_drops = 0; 2861 } 2862 2863 /* 2864 * This routine discards an active speculation. If the specified speculation 2865 * is not in a valid state to perform a discard(), this routine will silently 2866 * do nothing. The state of the specified speculation is transitioned 2867 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2868 */ 2869 static void 2870 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2871 dtrace_specid_t which) 2872 { 2873 dtrace_speculation_t *spec; 2874 dtrace_speculation_state_t current, new = 0; 2875 dtrace_buffer_t *buf; 2876 2877 if (which == 0) 2878 return; 2879 2880 if (which > state->dts_nspeculations) { 2881 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2882 return; 2883 } 2884 2885 spec = &state->dts_speculations[which - 1]; 2886 buf = &spec->dtsp_buffer[cpu]; 2887 2888 do { 2889 current = spec->dtsp_state; 2890 2891 switch (current) { 2892 case DTRACESPEC_INACTIVE: 2893 case DTRACESPEC_COMMITTINGMANY: 2894 case DTRACESPEC_COMMITTING: 2895 case DTRACESPEC_DISCARDING: 2896 return; 2897 2898 case DTRACESPEC_ACTIVE: 2899 case DTRACESPEC_ACTIVEMANY: 2900 new = DTRACESPEC_DISCARDING; 2901 break; 2902 2903 case DTRACESPEC_ACTIVEONE: 2904 if (buf->dtb_offset != 0) { 2905 new = DTRACESPEC_INACTIVE; 2906 } else { 2907 new = DTRACESPEC_DISCARDING; 2908 } 2909 break; 2910 2911 default: 2912 ASSERT(0); 2913 } 2914 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2915 current, new) != current); 2916 2917 buf->dtb_offset = 0; 2918 buf->dtb_drops = 0; 2919 } 2920 2921 /* 2922 * Note: not called from probe context. This function is called 2923 * asynchronously from cross call context to clean any speculations that are 2924 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2925 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2926 * speculation. 2927 */ 2928 static void 2929 dtrace_speculation_clean_here(dtrace_state_t *state) 2930 { 2931 dtrace_icookie_t cookie; 2932 processorid_t cpu = curcpu; 2933 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2934 dtrace_specid_t i; 2935 2936 cookie = dtrace_interrupt_disable(); 2937 2938 if (dest->dtb_tomax == NULL) { 2939 dtrace_interrupt_enable(cookie); 2940 return; 2941 } 2942 2943 for (i = 0; i < state->dts_nspeculations; i++) { 2944 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2945 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2946 2947 if (src->dtb_tomax == NULL) 2948 continue; 2949 2950 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2951 src->dtb_offset = 0; 2952 continue; 2953 } 2954 2955 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2956 continue; 2957 2958 if (src->dtb_offset == 0) 2959 continue; 2960 2961 dtrace_speculation_commit(state, cpu, i + 1); 2962 } 2963 2964 dtrace_interrupt_enable(cookie); 2965 } 2966 2967 /* 2968 * Note: not called from probe context. This function is called 2969 * asynchronously (and at a regular interval) to clean any speculations that 2970 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2971 * is work to be done, it cross calls all CPUs to perform that work; 2972 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2973 * INACTIVE state until they have been cleaned by all CPUs. 2974 */ 2975 static void 2976 dtrace_speculation_clean(dtrace_state_t *state) 2977 { 2978 int work = 0, rv; 2979 dtrace_specid_t i; 2980 2981 for (i = 0; i < state->dts_nspeculations; i++) { 2982 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2983 2984 ASSERT(!spec->dtsp_cleaning); 2985 2986 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2987 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2988 continue; 2989 2990 work++; 2991 spec->dtsp_cleaning = 1; 2992 } 2993 2994 if (!work) 2995 return; 2996 2997 dtrace_xcall(DTRACE_CPUALL, 2998 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2999 3000 /* 3001 * We now know that all CPUs have committed or discarded their 3002 * speculation buffers, as appropriate. We can now set the state 3003 * to inactive. 3004 */ 3005 for (i = 0; i < state->dts_nspeculations; i++) { 3006 dtrace_speculation_t *spec = &state->dts_speculations[i]; 3007 dtrace_speculation_state_t current, new; 3008 3009 if (!spec->dtsp_cleaning) 3010 continue; 3011 3012 current = spec->dtsp_state; 3013 ASSERT(current == DTRACESPEC_DISCARDING || 3014 current == DTRACESPEC_COMMITTINGMANY); 3015 3016 new = DTRACESPEC_INACTIVE; 3017 3018 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 3019 ASSERT(rv == current); 3020 spec->dtsp_cleaning = 0; 3021 } 3022 } 3023 3024 /* 3025 * Called as part of a speculate() to get the speculative buffer associated 3026 * with a given speculation. Returns NULL if the specified speculation is not 3027 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 3028 * the active CPU is not the specified CPU -- the speculation will be 3029 * atomically transitioned into the ACTIVEMANY state. 3030 */ 3031 static dtrace_buffer_t * 3032 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 3033 dtrace_specid_t which) 3034 { 3035 dtrace_speculation_t *spec; 3036 dtrace_speculation_state_t current, new = 0; 3037 dtrace_buffer_t *buf; 3038 3039 if (which == 0) 3040 return (NULL); 3041 3042 if (which > state->dts_nspeculations) { 3043 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3044 return (NULL); 3045 } 3046 3047 spec = &state->dts_speculations[which - 1]; 3048 buf = &spec->dtsp_buffer[cpuid]; 3049 3050 do { 3051 current = spec->dtsp_state; 3052 3053 switch (current) { 3054 case DTRACESPEC_INACTIVE: 3055 case DTRACESPEC_COMMITTINGMANY: 3056 case DTRACESPEC_DISCARDING: 3057 return (NULL); 3058 3059 case DTRACESPEC_COMMITTING: 3060 ASSERT(buf->dtb_offset == 0); 3061 return (NULL); 3062 3063 case DTRACESPEC_ACTIVEONE: 3064 /* 3065 * This speculation is currently active on one CPU. 3066 * Check the offset in the buffer; if it's non-zero, 3067 * that CPU must be us (and we leave the state alone). 3068 * If it's zero, assume that we're starting on a new 3069 * CPU -- and change the state to indicate that the 3070 * speculation is active on more than one CPU. 3071 */ 3072 if (buf->dtb_offset != 0) 3073 return (buf); 3074 3075 new = DTRACESPEC_ACTIVEMANY; 3076 break; 3077 3078 case DTRACESPEC_ACTIVEMANY: 3079 return (buf); 3080 3081 case DTRACESPEC_ACTIVE: 3082 new = DTRACESPEC_ACTIVEONE; 3083 break; 3084 3085 default: 3086 ASSERT(0); 3087 } 3088 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3089 current, new) != current); 3090 3091 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3092 return (buf); 3093 } 3094 3095 /* 3096 * Return a string. In the event that the user lacks the privilege to access 3097 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3098 * don't fail access checking. 3099 * 3100 * dtrace_dif_variable() uses this routine as a helper for various 3101 * builtin values such as 'execname' and 'probefunc.' 3102 */ 3103 uintptr_t 3104 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3105 dtrace_mstate_t *mstate) 3106 { 3107 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3108 uintptr_t ret; 3109 size_t strsz; 3110 3111 /* 3112 * The easy case: this probe is allowed to read all of memory, so 3113 * we can just return this as a vanilla pointer. 3114 */ 3115 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3116 return (addr); 3117 3118 /* 3119 * This is the tougher case: we copy the string in question from 3120 * kernel memory into scratch memory and return it that way: this 3121 * ensures that we won't trip up when access checking tests the 3122 * BYREF return value. 3123 */ 3124 strsz = dtrace_strlen((char *)addr, size) + 1; 3125 3126 if (mstate->dtms_scratch_ptr + strsz > 3127 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3128 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3129 return (0); 3130 } 3131 3132 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3133 strsz); 3134 ret = mstate->dtms_scratch_ptr; 3135 mstate->dtms_scratch_ptr += strsz; 3136 return (ret); 3137 } 3138 3139 /* 3140 * Return a string from a memoy address which is known to have one or 3141 * more concatenated, individually zero terminated, sub-strings. 3142 * In the event that the user lacks the privilege to access 3143 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3144 * don't fail access checking. 3145 * 3146 * dtrace_dif_variable() uses this routine as a helper for various 3147 * builtin values such as 'execargs'. 3148 */ 3149 static uintptr_t 3150 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3151 dtrace_mstate_t *mstate) 3152 { 3153 char *p; 3154 size_t i; 3155 uintptr_t ret; 3156 3157 if (mstate->dtms_scratch_ptr + strsz > 3158 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3159 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3160 return (0); 3161 } 3162 3163 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3164 strsz); 3165 3166 /* Replace sub-string termination characters with a space. */ 3167 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3168 p++, i++) 3169 if (*p == '\0') 3170 *p = ' '; 3171 3172 ret = mstate->dtms_scratch_ptr; 3173 mstate->dtms_scratch_ptr += strsz; 3174 return (ret); 3175 } 3176 3177 /* 3178 * This function implements the DIF emulator's variable lookups. The emulator 3179 * passes a reserved variable identifier and optional built-in array index. 3180 */ 3181 static uint64_t 3182 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3183 uint64_t ndx) 3184 { 3185 /* 3186 * If we're accessing one of the uncached arguments, we'll turn this 3187 * into a reference in the args array. 3188 */ 3189 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3190 ndx = v - DIF_VAR_ARG0; 3191 v = DIF_VAR_ARGS; 3192 } 3193 3194 switch (v) { 3195 case DIF_VAR_ARGS: 3196 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3197 if (ndx >= sizeof (mstate->dtms_arg) / 3198 sizeof (mstate->dtms_arg[0])) { 3199 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3200 dtrace_provider_t *pv; 3201 uint64_t val; 3202 3203 pv = mstate->dtms_probe->dtpr_provider; 3204 if (pv->dtpv_pops.dtps_getargval != NULL) 3205 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3206 mstate->dtms_probe->dtpr_id, 3207 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3208 else 3209 val = dtrace_getarg(ndx, aframes); 3210 3211 /* 3212 * This is regrettably required to keep the compiler 3213 * from tail-optimizing the call to dtrace_getarg(). 3214 * The condition always evaluates to true, but the 3215 * compiler has no way of figuring that out a priori. 3216 * (None of this would be necessary if the compiler 3217 * could be relied upon to _always_ tail-optimize 3218 * the call to dtrace_getarg() -- but it can't.) 3219 */ 3220 if (mstate->dtms_probe != NULL) 3221 return (val); 3222 3223 ASSERT(0); 3224 } 3225 3226 return (mstate->dtms_arg[ndx]); 3227 3228 #ifdef illumos 3229 case DIF_VAR_UREGS: { 3230 klwp_t *lwp; 3231 3232 if (!dtrace_priv_proc(state)) 3233 return (0); 3234 3235 if ((lwp = curthread->t_lwp) == NULL) { 3236 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3237 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3238 return (0); 3239 } 3240 3241 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3242 return (0); 3243 } 3244 #else 3245 case DIF_VAR_UREGS: { 3246 struct trapframe *tframe; 3247 3248 if (!dtrace_priv_proc(state)) 3249 return (0); 3250 3251 if ((tframe = curthread->td_frame) == NULL) { 3252 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3253 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3254 return (0); 3255 } 3256 3257 return (dtrace_getreg(tframe, ndx)); 3258 } 3259 #endif 3260 3261 case DIF_VAR_CURTHREAD: 3262 if (!dtrace_priv_proc(state)) 3263 return (0); 3264 return ((uint64_t)(uintptr_t)curthread); 3265 3266 case DIF_VAR_TIMESTAMP: 3267 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3268 mstate->dtms_timestamp = dtrace_gethrtime(); 3269 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3270 } 3271 return (mstate->dtms_timestamp); 3272 3273 case DIF_VAR_VTIMESTAMP: 3274 ASSERT(dtrace_vtime_references != 0); 3275 return (curthread->t_dtrace_vtime); 3276 3277 case DIF_VAR_WALLTIMESTAMP: 3278 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3279 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3280 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3281 } 3282 return (mstate->dtms_walltimestamp); 3283 3284 #ifdef illumos 3285 case DIF_VAR_IPL: 3286 if (!dtrace_priv_kernel(state)) 3287 return (0); 3288 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3289 mstate->dtms_ipl = dtrace_getipl(); 3290 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3291 } 3292 return (mstate->dtms_ipl); 3293 #endif 3294 3295 case DIF_VAR_EPID: 3296 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3297 return (mstate->dtms_epid); 3298 3299 case DIF_VAR_ID: 3300 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3301 return (mstate->dtms_probe->dtpr_id); 3302 3303 case DIF_VAR_STACKDEPTH: 3304 if (!dtrace_priv_kernel(state)) 3305 return (0); 3306 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3307 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3308 3309 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3310 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3311 } 3312 return (mstate->dtms_stackdepth); 3313 3314 case DIF_VAR_USTACKDEPTH: 3315 if (!dtrace_priv_proc(state)) 3316 return (0); 3317 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3318 /* 3319 * See comment in DIF_VAR_PID. 3320 */ 3321 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3322 CPU_ON_INTR(CPU)) { 3323 mstate->dtms_ustackdepth = 0; 3324 } else { 3325 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3326 mstate->dtms_ustackdepth = 3327 dtrace_getustackdepth(); 3328 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3329 } 3330 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3331 } 3332 return (mstate->dtms_ustackdepth); 3333 3334 case DIF_VAR_CALLER: 3335 if (!dtrace_priv_kernel(state)) 3336 return (0); 3337 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3338 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3339 3340 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3341 /* 3342 * If this is an unanchored probe, we are 3343 * required to go through the slow path: 3344 * dtrace_caller() only guarantees correct 3345 * results for anchored probes. 3346 */ 3347 pc_t caller[2] = {0, 0}; 3348 3349 dtrace_getpcstack(caller, 2, aframes, 3350 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3351 mstate->dtms_caller = caller[1]; 3352 } else if ((mstate->dtms_caller = 3353 dtrace_caller(aframes)) == -1) { 3354 /* 3355 * We have failed to do this the quick way; 3356 * we must resort to the slower approach of 3357 * calling dtrace_getpcstack(). 3358 */ 3359 pc_t caller = 0; 3360 3361 dtrace_getpcstack(&caller, 1, aframes, NULL); 3362 mstate->dtms_caller = caller; 3363 } 3364 3365 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3366 } 3367 return (mstate->dtms_caller); 3368 3369 case DIF_VAR_UCALLER: 3370 if (!dtrace_priv_proc(state)) 3371 return (0); 3372 3373 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3374 uint64_t ustack[3]; 3375 3376 /* 3377 * dtrace_getupcstack() fills in the first uint64_t 3378 * with the current PID. The second uint64_t will 3379 * be the program counter at user-level. The third 3380 * uint64_t will contain the caller, which is what 3381 * we're after. 3382 */ 3383 ustack[2] = 0; 3384 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3385 dtrace_getupcstack(ustack, 3); 3386 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3387 mstate->dtms_ucaller = ustack[2]; 3388 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3389 } 3390 3391 return (mstate->dtms_ucaller); 3392 3393 case DIF_VAR_PROBEPROV: 3394 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3395 return (dtrace_dif_varstr( 3396 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3397 state, mstate)); 3398 3399 case DIF_VAR_PROBEMOD: 3400 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3401 return (dtrace_dif_varstr( 3402 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3403 state, mstate)); 3404 3405 case DIF_VAR_PROBEFUNC: 3406 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3407 return (dtrace_dif_varstr( 3408 (uintptr_t)mstate->dtms_probe->dtpr_func, 3409 state, mstate)); 3410 3411 case DIF_VAR_PROBENAME: 3412 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3413 return (dtrace_dif_varstr( 3414 (uintptr_t)mstate->dtms_probe->dtpr_name, 3415 state, mstate)); 3416 3417 case DIF_VAR_PID: 3418 if (!dtrace_priv_proc(state)) 3419 return (0); 3420 3421 #ifdef illumos 3422 /* 3423 * Note that we are assuming that an unanchored probe is 3424 * always due to a high-level interrupt. (And we're assuming 3425 * that there is only a single high level interrupt.) 3426 */ 3427 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3428 return (pid0.pid_id); 3429 3430 /* 3431 * It is always safe to dereference one's own t_procp pointer: 3432 * it always points to a valid, allocated proc structure. 3433 * Further, it is always safe to dereference the p_pidp member 3434 * of one's own proc structure. (These are truisms becuase 3435 * threads and processes don't clean up their own state -- 3436 * they leave that task to whomever reaps them.) 3437 */ 3438 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3439 #else 3440 return ((uint64_t)curproc->p_pid); 3441 #endif 3442 3443 case DIF_VAR_PPID: 3444 if (!dtrace_priv_proc(state)) 3445 return (0); 3446 3447 #ifdef illumos 3448 /* 3449 * See comment in DIF_VAR_PID. 3450 */ 3451 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3452 return (pid0.pid_id); 3453 3454 /* 3455 * It is always safe to dereference one's own t_procp pointer: 3456 * it always points to a valid, allocated proc structure. 3457 * (This is true because threads don't clean up their own 3458 * state -- they leave that task to whomever reaps them.) 3459 */ 3460 return ((uint64_t)curthread->t_procp->p_ppid); 3461 #else 3462 if (curproc->p_pid == proc0.p_pid) 3463 return (curproc->p_pid); 3464 else 3465 return (curproc->p_pptr->p_pid); 3466 #endif 3467 3468 case DIF_VAR_TID: 3469 #ifdef illumos 3470 /* 3471 * See comment in DIF_VAR_PID. 3472 */ 3473 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3474 return (0); 3475 #endif 3476 3477 return ((uint64_t)curthread->t_tid); 3478 3479 case DIF_VAR_EXECARGS: { 3480 struct pargs *p_args = curthread->td_proc->p_args; 3481 3482 if (p_args == NULL) 3483 return(0); 3484 3485 return (dtrace_dif_varstrz( 3486 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3487 } 3488 3489 case DIF_VAR_EXECNAME: 3490 #ifdef illumos 3491 if (!dtrace_priv_proc(state)) 3492 return (0); 3493 3494 /* 3495 * See comment in DIF_VAR_PID. 3496 */ 3497 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3498 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3499 3500 /* 3501 * It is always safe to dereference one's own t_procp pointer: 3502 * it always points to a valid, allocated proc structure. 3503 * (This is true because threads don't clean up their own 3504 * state -- they leave that task to whomever reaps them.) 3505 */ 3506 return (dtrace_dif_varstr( 3507 (uintptr_t)curthread->t_procp->p_user.u_comm, 3508 state, mstate)); 3509 #else 3510 return (dtrace_dif_varstr( 3511 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3512 #endif 3513 3514 case DIF_VAR_ZONENAME: 3515 #ifdef illumos 3516 if (!dtrace_priv_proc(state)) 3517 return (0); 3518 3519 /* 3520 * See comment in DIF_VAR_PID. 3521 */ 3522 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3523 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3524 3525 /* 3526 * It is always safe to dereference one's own t_procp pointer: 3527 * it always points to a valid, allocated proc structure. 3528 * (This is true because threads don't clean up their own 3529 * state -- they leave that task to whomever reaps them.) 3530 */ 3531 return (dtrace_dif_varstr( 3532 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3533 state, mstate)); 3534 #else 3535 return (0); 3536 #endif 3537 3538 case DIF_VAR_UID: 3539 if (!dtrace_priv_proc(state)) 3540 return (0); 3541 3542 #ifdef illumos 3543 /* 3544 * See comment in DIF_VAR_PID. 3545 */ 3546 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3547 return ((uint64_t)p0.p_cred->cr_uid); 3548 3549 /* 3550 * It is always safe to dereference one's own t_procp pointer: 3551 * it always points to a valid, allocated proc structure. 3552 * (This is true because threads don't clean up their own 3553 * state -- they leave that task to whomever reaps them.) 3554 * 3555 * Additionally, it is safe to dereference one's own process 3556 * credential, since this is never NULL after process birth. 3557 */ 3558 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3559 #else 3560 return ((uint64_t)curthread->td_ucred->cr_uid); 3561 #endif 3562 3563 case DIF_VAR_GID: 3564 if (!dtrace_priv_proc(state)) 3565 return (0); 3566 3567 #ifdef illumos 3568 /* 3569 * See comment in DIF_VAR_PID. 3570 */ 3571 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3572 return ((uint64_t)p0.p_cred->cr_gid); 3573 3574 /* 3575 * It is always safe to dereference one's own t_procp pointer: 3576 * it always points to a valid, allocated proc structure. 3577 * (This is true because threads don't clean up their own 3578 * state -- they leave that task to whomever reaps them.) 3579 * 3580 * Additionally, it is safe to dereference one's own process 3581 * credential, since this is never NULL after process birth. 3582 */ 3583 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3584 #else 3585 return ((uint64_t)curthread->td_ucred->cr_gid); 3586 #endif 3587 3588 case DIF_VAR_ERRNO: { 3589 #ifdef illumos 3590 klwp_t *lwp; 3591 if (!dtrace_priv_proc(state)) 3592 return (0); 3593 3594 /* 3595 * See comment in DIF_VAR_PID. 3596 */ 3597 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3598 return (0); 3599 3600 /* 3601 * It is always safe to dereference one's own t_lwp pointer in 3602 * the event that this pointer is non-NULL. (This is true 3603 * because threads and lwps don't clean up their own state -- 3604 * they leave that task to whomever reaps them.) 3605 */ 3606 if ((lwp = curthread->t_lwp) == NULL) 3607 return (0); 3608 3609 return ((uint64_t)lwp->lwp_errno); 3610 #else 3611 return (curthread->td_errno); 3612 #endif 3613 } 3614 #ifndef illumos 3615 case DIF_VAR_CPU: { 3616 return curcpu; 3617 } 3618 #endif 3619 default: 3620 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3621 return (0); 3622 } 3623 } 3624 3625 3626 typedef enum dtrace_json_state { 3627 DTRACE_JSON_REST = 1, 3628 DTRACE_JSON_OBJECT, 3629 DTRACE_JSON_STRING, 3630 DTRACE_JSON_STRING_ESCAPE, 3631 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3632 DTRACE_JSON_COLON, 3633 DTRACE_JSON_COMMA, 3634 DTRACE_JSON_VALUE, 3635 DTRACE_JSON_IDENTIFIER, 3636 DTRACE_JSON_NUMBER, 3637 DTRACE_JSON_NUMBER_FRAC, 3638 DTRACE_JSON_NUMBER_EXP, 3639 DTRACE_JSON_COLLECT_OBJECT 3640 } dtrace_json_state_t; 3641 3642 /* 3643 * This function possesses just enough knowledge about JSON to extract a single 3644 * value from a JSON string and store it in the scratch buffer. It is able 3645 * to extract nested object values, and members of arrays by index. 3646 * 3647 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3648 * be looked up as we descend into the object tree. e.g. 3649 * 3650 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3651 * with nelems = 5. 3652 * 3653 * The run time of this function must be bounded above by strsize to limit the 3654 * amount of work done in probe context. As such, it is implemented as a 3655 * simple state machine, reading one character at a time using safe loads 3656 * until we find the requested element, hit a parsing error or run off the 3657 * end of the object or string. 3658 * 3659 * As there is no way for a subroutine to return an error without interrupting 3660 * clause execution, we simply return NULL in the event of a missing key or any 3661 * other error condition. Each NULL return in this function is commented with 3662 * the error condition it represents -- parsing or otherwise. 3663 * 3664 * The set of states for the state machine closely matches the JSON 3665 * specification (http://json.org/). Briefly: 3666 * 3667 * DTRACE_JSON_REST: 3668 * Skip whitespace until we find either a top-level Object, moving 3669 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3670 * 3671 * DTRACE_JSON_OBJECT: 3672 * Locate the next key String in an Object. Sets a flag to denote 3673 * the next String as a key string and moves to DTRACE_JSON_STRING. 3674 * 3675 * DTRACE_JSON_COLON: 3676 * Skip whitespace until we find the colon that separates key Strings 3677 * from their values. Once found, move to DTRACE_JSON_VALUE. 3678 * 3679 * DTRACE_JSON_VALUE: 3680 * Detects the type of the next value (String, Number, Identifier, Object 3681 * or Array) and routes to the states that process that type. Here we also 3682 * deal with the element selector list if we are requested to traverse down 3683 * into the object tree. 3684 * 3685 * DTRACE_JSON_COMMA: 3686 * Skip whitespace until we find the comma that separates key-value pairs 3687 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3688 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3689 * states return to this state at the end of their value, unless otherwise 3690 * noted. 3691 * 3692 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3693 * Processes a Number literal from the JSON, including any exponent 3694 * component that may be present. Numbers are returned as strings, which 3695 * may be passed to strtoll() if an integer is required. 3696 * 3697 * DTRACE_JSON_IDENTIFIER: 3698 * Processes a "true", "false" or "null" literal in the JSON. 3699 * 3700 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3701 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3702 * Processes a String literal from the JSON, whether the String denotes 3703 * a key, a value or part of a larger Object. Handles all escape sequences 3704 * present in the specification, including four-digit unicode characters, 3705 * but merely includes the escape sequence without converting it to the 3706 * actual escaped character. If the String is flagged as a key, we 3707 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3708 * 3709 * DTRACE_JSON_COLLECT_OBJECT: 3710 * This state collects an entire Object (or Array), correctly handling 3711 * embedded strings. If the full element selector list matches this nested 3712 * object, we return the Object in full as a string. If not, we use this 3713 * state to skip to the next value at this level and continue processing. 3714 * 3715 * NOTE: This function uses various macros from strtolctype.h to manipulate 3716 * digit values, etc -- these have all been checked to ensure they make 3717 * no additional function calls. 3718 */ 3719 static char * 3720 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3721 char *dest) 3722 { 3723 dtrace_json_state_t state = DTRACE_JSON_REST; 3724 int64_t array_elem = INT64_MIN; 3725 int64_t array_pos = 0; 3726 uint8_t escape_unicount = 0; 3727 boolean_t string_is_key = B_FALSE; 3728 boolean_t collect_object = B_FALSE; 3729 boolean_t found_key = B_FALSE; 3730 boolean_t in_array = B_FALSE; 3731 uint32_t braces = 0, brackets = 0; 3732 char *elem = elemlist; 3733 char *dd = dest; 3734 uintptr_t cur; 3735 3736 for (cur = json; cur < json + size; cur++) { 3737 char cc = dtrace_load8(cur); 3738 if (cc == '\0') 3739 return (NULL); 3740 3741 switch (state) { 3742 case DTRACE_JSON_REST: 3743 if (isspace(cc)) 3744 break; 3745 3746 if (cc == '{') { 3747 state = DTRACE_JSON_OBJECT; 3748 break; 3749 } 3750 3751 if (cc == '[') { 3752 in_array = B_TRUE; 3753 array_pos = 0; 3754 array_elem = dtrace_strtoll(elem, 10, size); 3755 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3756 state = DTRACE_JSON_VALUE; 3757 break; 3758 } 3759 3760 /* 3761 * ERROR: expected to find a top-level object or array. 3762 */ 3763 return (NULL); 3764 case DTRACE_JSON_OBJECT: 3765 if (isspace(cc)) 3766 break; 3767 3768 if (cc == '"') { 3769 state = DTRACE_JSON_STRING; 3770 string_is_key = B_TRUE; 3771 break; 3772 } 3773 3774 /* 3775 * ERROR: either the object did not start with a key 3776 * string, or we've run off the end of the object 3777 * without finding the requested key. 3778 */ 3779 return (NULL); 3780 case DTRACE_JSON_STRING: 3781 if (cc == '\\') { 3782 *dd++ = '\\'; 3783 state = DTRACE_JSON_STRING_ESCAPE; 3784 break; 3785 } 3786 3787 if (cc == '"') { 3788 if (collect_object) { 3789 /* 3790 * We don't reset the dest here, as 3791 * the string is part of a larger 3792 * object being collected. 3793 */ 3794 *dd++ = cc; 3795 collect_object = B_FALSE; 3796 state = DTRACE_JSON_COLLECT_OBJECT; 3797 break; 3798 } 3799 *dd = '\0'; 3800 dd = dest; /* reset string buffer */ 3801 if (string_is_key) { 3802 if (dtrace_strncmp(dest, elem, 3803 size) == 0) 3804 found_key = B_TRUE; 3805 } else if (found_key) { 3806 if (nelems > 1) { 3807 /* 3808 * We expected an object, not 3809 * this string. 3810 */ 3811 return (NULL); 3812 } 3813 return (dest); 3814 } 3815 state = string_is_key ? DTRACE_JSON_COLON : 3816 DTRACE_JSON_COMMA; 3817 string_is_key = B_FALSE; 3818 break; 3819 } 3820 3821 *dd++ = cc; 3822 break; 3823 case DTRACE_JSON_STRING_ESCAPE: 3824 *dd++ = cc; 3825 if (cc == 'u') { 3826 escape_unicount = 0; 3827 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3828 } else { 3829 state = DTRACE_JSON_STRING; 3830 } 3831 break; 3832 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3833 if (!isxdigit(cc)) { 3834 /* 3835 * ERROR: invalid unicode escape, expected 3836 * four valid hexidecimal digits. 3837 */ 3838 return (NULL); 3839 } 3840 3841 *dd++ = cc; 3842 if (++escape_unicount == 4) 3843 state = DTRACE_JSON_STRING; 3844 break; 3845 case DTRACE_JSON_COLON: 3846 if (isspace(cc)) 3847 break; 3848 3849 if (cc == ':') { 3850 state = DTRACE_JSON_VALUE; 3851 break; 3852 } 3853 3854 /* 3855 * ERROR: expected a colon. 3856 */ 3857 return (NULL); 3858 case DTRACE_JSON_COMMA: 3859 if (isspace(cc)) 3860 break; 3861 3862 if (cc == ',') { 3863 if (in_array) { 3864 state = DTRACE_JSON_VALUE; 3865 if (++array_pos == array_elem) 3866 found_key = B_TRUE; 3867 } else { 3868 state = DTRACE_JSON_OBJECT; 3869 } 3870 break; 3871 } 3872 3873 /* 3874 * ERROR: either we hit an unexpected character, or 3875 * we reached the end of the object or array without 3876 * finding the requested key. 3877 */ 3878 return (NULL); 3879 case DTRACE_JSON_IDENTIFIER: 3880 if (islower(cc)) { 3881 *dd++ = cc; 3882 break; 3883 } 3884 3885 *dd = '\0'; 3886 dd = dest; /* reset string buffer */ 3887 3888 if (dtrace_strncmp(dest, "true", 5) == 0 || 3889 dtrace_strncmp(dest, "false", 6) == 0 || 3890 dtrace_strncmp(dest, "null", 5) == 0) { 3891 if (found_key) { 3892 if (nelems > 1) { 3893 /* 3894 * ERROR: We expected an object, 3895 * not this identifier. 3896 */ 3897 return (NULL); 3898 } 3899 return (dest); 3900 } else { 3901 cur--; 3902 state = DTRACE_JSON_COMMA; 3903 break; 3904 } 3905 } 3906 3907 /* 3908 * ERROR: we did not recognise the identifier as one 3909 * of those in the JSON specification. 3910 */ 3911 return (NULL); 3912 case DTRACE_JSON_NUMBER: 3913 if (cc == '.') { 3914 *dd++ = cc; 3915 state = DTRACE_JSON_NUMBER_FRAC; 3916 break; 3917 } 3918 3919 if (cc == 'x' || cc == 'X') { 3920 /* 3921 * ERROR: specification explicitly excludes 3922 * hexidecimal or octal numbers. 3923 */ 3924 return (NULL); 3925 } 3926 3927 /* FALLTHRU */ 3928 case DTRACE_JSON_NUMBER_FRAC: 3929 if (cc == 'e' || cc == 'E') { 3930 *dd++ = cc; 3931 state = DTRACE_JSON_NUMBER_EXP; 3932 break; 3933 } 3934 3935 if (cc == '+' || cc == '-') { 3936 /* 3937 * ERROR: expect sign as part of exponent only. 3938 */ 3939 return (NULL); 3940 } 3941 /* FALLTHRU */ 3942 case DTRACE_JSON_NUMBER_EXP: 3943 if (isdigit(cc) || cc == '+' || cc == '-') { 3944 *dd++ = cc; 3945 break; 3946 } 3947 3948 *dd = '\0'; 3949 dd = dest; /* reset string buffer */ 3950 if (found_key) { 3951 if (nelems > 1) { 3952 /* 3953 * ERROR: We expected an object, not 3954 * this number. 3955 */ 3956 return (NULL); 3957 } 3958 return (dest); 3959 } 3960 3961 cur--; 3962 state = DTRACE_JSON_COMMA; 3963 break; 3964 case DTRACE_JSON_VALUE: 3965 if (isspace(cc)) 3966 break; 3967 3968 if (cc == '{' || cc == '[') { 3969 if (nelems > 1 && found_key) { 3970 in_array = cc == '[' ? B_TRUE : B_FALSE; 3971 /* 3972 * If our element selector directs us 3973 * to descend into this nested object, 3974 * then move to the next selector 3975 * element in the list and restart the 3976 * state machine. 3977 */ 3978 while (*elem != '\0') 3979 elem++; 3980 elem++; /* skip the inter-element NUL */ 3981 nelems--; 3982 dd = dest; 3983 if (in_array) { 3984 state = DTRACE_JSON_VALUE; 3985 array_pos = 0; 3986 array_elem = dtrace_strtoll( 3987 elem, 10, size); 3988 found_key = array_elem == 0 ? 3989 B_TRUE : B_FALSE; 3990 } else { 3991 found_key = B_FALSE; 3992 state = DTRACE_JSON_OBJECT; 3993 } 3994 break; 3995 } 3996 3997 /* 3998 * Otherwise, we wish to either skip this 3999 * nested object or return it in full. 4000 */ 4001 if (cc == '[') 4002 brackets = 1; 4003 else 4004 braces = 1; 4005 *dd++ = cc; 4006 state = DTRACE_JSON_COLLECT_OBJECT; 4007 break; 4008 } 4009 4010 if (cc == '"') { 4011 state = DTRACE_JSON_STRING; 4012 break; 4013 } 4014 4015 if (islower(cc)) { 4016 /* 4017 * Here we deal with true, false and null. 4018 */ 4019 *dd++ = cc; 4020 state = DTRACE_JSON_IDENTIFIER; 4021 break; 4022 } 4023 4024 if (cc == '-' || isdigit(cc)) { 4025 *dd++ = cc; 4026 state = DTRACE_JSON_NUMBER; 4027 break; 4028 } 4029 4030 /* 4031 * ERROR: unexpected character at start of value. 4032 */ 4033 return (NULL); 4034 case DTRACE_JSON_COLLECT_OBJECT: 4035 if (cc == '\0') 4036 /* 4037 * ERROR: unexpected end of input. 4038 */ 4039 return (NULL); 4040 4041 *dd++ = cc; 4042 if (cc == '"') { 4043 collect_object = B_TRUE; 4044 state = DTRACE_JSON_STRING; 4045 break; 4046 } 4047 4048 if (cc == ']') { 4049 if (brackets-- == 0) { 4050 /* 4051 * ERROR: unbalanced brackets. 4052 */ 4053 return (NULL); 4054 } 4055 } else if (cc == '}') { 4056 if (braces-- == 0) { 4057 /* 4058 * ERROR: unbalanced braces. 4059 */ 4060 return (NULL); 4061 } 4062 } else if (cc == '{') { 4063 braces++; 4064 } else if (cc == '[') { 4065 brackets++; 4066 } 4067 4068 if (brackets == 0 && braces == 0) { 4069 if (found_key) { 4070 *dd = '\0'; 4071 return (dest); 4072 } 4073 dd = dest; /* reset string buffer */ 4074 state = DTRACE_JSON_COMMA; 4075 } 4076 break; 4077 } 4078 } 4079 return (NULL); 4080 } 4081 4082 /* 4083 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4084 * Notice that we don't bother validating the proper number of arguments or 4085 * their types in the tuple stack. This isn't needed because all argument 4086 * interpretation is safe because of our load safety -- the worst that can 4087 * happen is that a bogus program can obtain bogus results. 4088 */ 4089 static void 4090 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4091 dtrace_key_t *tupregs, int nargs, 4092 dtrace_mstate_t *mstate, dtrace_state_t *state) 4093 { 4094 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4095 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4096 dtrace_vstate_t *vstate = &state->dts_vstate; 4097 4098 #ifdef illumos 4099 union { 4100 mutex_impl_t mi; 4101 uint64_t mx; 4102 } m; 4103 4104 union { 4105 krwlock_t ri; 4106 uintptr_t rw; 4107 } r; 4108 #else 4109 struct thread *lowner; 4110 union { 4111 struct lock_object *li; 4112 uintptr_t lx; 4113 } l; 4114 #endif 4115 4116 switch (subr) { 4117 case DIF_SUBR_RAND: 4118 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 4119 break; 4120 4121 #ifdef illumos 4122 case DIF_SUBR_MUTEX_OWNED: 4123 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4124 mstate, vstate)) { 4125 regs[rd] = 0; 4126 break; 4127 } 4128 4129 m.mx = dtrace_load64(tupregs[0].dttk_value); 4130 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4131 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4132 else 4133 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4134 break; 4135 4136 case DIF_SUBR_MUTEX_OWNER: 4137 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4138 mstate, vstate)) { 4139 regs[rd] = 0; 4140 break; 4141 } 4142 4143 m.mx = dtrace_load64(tupregs[0].dttk_value); 4144 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4145 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4146 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4147 else 4148 regs[rd] = 0; 4149 break; 4150 4151 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4152 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4153 mstate, vstate)) { 4154 regs[rd] = 0; 4155 break; 4156 } 4157 4158 m.mx = dtrace_load64(tupregs[0].dttk_value); 4159 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4160 break; 4161 4162 case DIF_SUBR_MUTEX_TYPE_SPIN: 4163 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4164 mstate, vstate)) { 4165 regs[rd] = 0; 4166 break; 4167 } 4168 4169 m.mx = dtrace_load64(tupregs[0].dttk_value); 4170 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4171 break; 4172 4173 case DIF_SUBR_RW_READ_HELD: { 4174 uintptr_t tmp; 4175 4176 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4177 mstate, vstate)) { 4178 regs[rd] = 0; 4179 break; 4180 } 4181 4182 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4183 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4184 break; 4185 } 4186 4187 case DIF_SUBR_RW_WRITE_HELD: 4188 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4189 mstate, vstate)) { 4190 regs[rd] = 0; 4191 break; 4192 } 4193 4194 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4195 regs[rd] = _RW_WRITE_HELD(&r.ri); 4196 break; 4197 4198 case DIF_SUBR_RW_ISWRITER: 4199 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4200 mstate, vstate)) { 4201 regs[rd] = 0; 4202 break; 4203 } 4204 4205 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4206 regs[rd] = _RW_ISWRITER(&r.ri); 4207 break; 4208 4209 #else /* !illumos */ 4210 case DIF_SUBR_MUTEX_OWNED: 4211 if (!dtrace_canload(tupregs[0].dttk_value, 4212 sizeof (struct lock_object), mstate, vstate)) { 4213 regs[rd] = 0; 4214 break; 4215 } 4216 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4217 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4218 break; 4219 4220 case DIF_SUBR_MUTEX_OWNER: 4221 if (!dtrace_canload(tupregs[0].dttk_value, 4222 sizeof (struct lock_object), mstate, vstate)) { 4223 regs[rd] = 0; 4224 break; 4225 } 4226 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4227 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4228 regs[rd] = (uintptr_t)lowner; 4229 break; 4230 4231 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4232 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4233 mstate, vstate)) { 4234 regs[rd] = 0; 4235 break; 4236 } 4237 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4238 /* XXX - should be only LC_SLEEPABLE? */ 4239 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 4240 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 4241 break; 4242 4243 case DIF_SUBR_MUTEX_TYPE_SPIN: 4244 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4245 mstate, vstate)) { 4246 regs[rd] = 0; 4247 break; 4248 } 4249 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4250 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4251 break; 4252 4253 case DIF_SUBR_RW_READ_HELD: 4254 case DIF_SUBR_SX_SHARED_HELD: 4255 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4256 mstate, vstate)) { 4257 regs[rd] = 0; 4258 break; 4259 } 4260 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4261 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4262 lowner == NULL; 4263 break; 4264 4265 case DIF_SUBR_RW_WRITE_HELD: 4266 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4267 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4268 mstate, vstate)) { 4269 regs[rd] = 0; 4270 break; 4271 } 4272 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4273 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4274 regs[rd] = (lowner == curthread); 4275 break; 4276 4277 case DIF_SUBR_RW_ISWRITER: 4278 case DIF_SUBR_SX_ISEXCLUSIVE: 4279 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4280 mstate, vstate)) { 4281 regs[rd] = 0; 4282 break; 4283 } 4284 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4285 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4286 lowner != NULL; 4287 break; 4288 #endif /* illumos */ 4289 4290 case DIF_SUBR_BCOPY: { 4291 /* 4292 * We need to be sure that the destination is in the scratch 4293 * region -- no other region is allowed. 4294 */ 4295 uintptr_t src = tupregs[0].dttk_value; 4296 uintptr_t dest = tupregs[1].dttk_value; 4297 size_t size = tupregs[2].dttk_value; 4298 4299 if (!dtrace_inscratch(dest, size, mstate)) { 4300 *flags |= CPU_DTRACE_BADADDR; 4301 *illval = regs[rd]; 4302 break; 4303 } 4304 4305 if (!dtrace_canload(src, size, mstate, vstate)) { 4306 regs[rd] = 0; 4307 break; 4308 } 4309 4310 dtrace_bcopy((void *)src, (void *)dest, size); 4311 break; 4312 } 4313 4314 case DIF_SUBR_ALLOCA: 4315 case DIF_SUBR_COPYIN: { 4316 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4317 uint64_t size = 4318 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4319 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4320 4321 /* 4322 * This action doesn't require any credential checks since 4323 * probes will not activate in user contexts to which the 4324 * enabling user does not have permissions. 4325 */ 4326 4327 /* 4328 * Rounding up the user allocation size could have overflowed 4329 * a large, bogus allocation (like -1ULL) to 0. 4330 */ 4331 if (scratch_size < size || 4332 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4333 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4334 regs[rd] = 0; 4335 break; 4336 } 4337 4338 if (subr == DIF_SUBR_COPYIN) { 4339 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4340 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4341 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4342 } 4343 4344 mstate->dtms_scratch_ptr += scratch_size; 4345 regs[rd] = dest; 4346 break; 4347 } 4348 4349 case DIF_SUBR_COPYINTO: { 4350 uint64_t size = tupregs[1].dttk_value; 4351 uintptr_t dest = tupregs[2].dttk_value; 4352 4353 /* 4354 * This action doesn't require any credential checks since 4355 * probes will not activate in user contexts to which the 4356 * enabling user does not have permissions. 4357 */ 4358 if (!dtrace_inscratch(dest, size, mstate)) { 4359 *flags |= CPU_DTRACE_BADADDR; 4360 *illval = regs[rd]; 4361 break; 4362 } 4363 4364 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4365 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4366 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4367 break; 4368 } 4369 4370 case DIF_SUBR_COPYINSTR: { 4371 uintptr_t dest = mstate->dtms_scratch_ptr; 4372 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4373 4374 if (nargs > 1 && tupregs[1].dttk_value < size) 4375 size = tupregs[1].dttk_value + 1; 4376 4377 /* 4378 * This action doesn't require any credential checks since 4379 * probes will not activate in user contexts to which the 4380 * enabling user does not have permissions. 4381 */ 4382 if (!DTRACE_INSCRATCH(mstate, size)) { 4383 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4384 regs[rd] = 0; 4385 break; 4386 } 4387 4388 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4389 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4390 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4391 4392 ((char *)dest)[size - 1] = '\0'; 4393 mstate->dtms_scratch_ptr += size; 4394 regs[rd] = dest; 4395 break; 4396 } 4397 4398 #ifdef illumos 4399 case DIF_SUBR_MSGSIZE: 4400 case DIF_SUBR_MSGDSIZE: { 4401 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4402 uintptr_t wptr, rptr; 4403 size_t count = 0; 4404 int cont = 0; 4405 4406 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4407 4408 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4409 vstate)) { 4410 regs[rd] = 0; 4411 break; 4412 } 4413 4414 wptr = dtrace_loadptr(baddr + 4415 offsetof(mblk_t, b_wptr)); 4416 4417 rptr = dtrace_loadptr(baddr + 4418 offsetof(mblk_t, b_rptr)); 4419 4420 if (wptr < rptr) { 4421 *flags |= CPU_DTRACE_BADADDR; 4422 *illval = tupregs[0].dttk_value; 4423 break; 4424 } 4425 4426 daddr = dtrace_loadptr(baddr + 4427 offsetof(mblk_t, b_datap)); 4428 4429 baddr = dtrace_loadptr(baddr + 4430 offsetof(mblk_t, b_cont)); 4431 4432 /* 4433 * We want to prevent against denial-of-service here, 4434 * so we're only going to search the list for 4435 * dtrace_msgdsize_max mblks. 4436 */ 4437 if (cont++ > dtrace_msgdsize_max) { 4438 *flags |= CPU_DTRACE_ILLOP; 4439 break; 4440 } 4441 4442 if (subr == DIF_SUBR_MSGDSIZE) { 4443 if (dtrace_load8(daddr + 4444 offsetof(dblk_t, db_type)) != M_DATA) 4445 continue; 4446 } 4447 4448 count += wptr - rptr; 4449 } 4450 4451 if (!(*flags & CPU_DTRACE_FAULT)) 4452 regs[rd] = count; 4453 4454 break; 4455 } 4456 #endif 4457 4458 case DIF_SUBR_PROGENYOF: { 4459 pid_t pid = tupregs[0].dttk_value; 4460 proc_t *p; 4461 int rval = 0; 4462 4463 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4464 4465 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4466 #ifdef illumos 4467 if (p->p_pidp->pid_id == pid) { 4468 #else 4469 if (p->p_pid == pid) { 4470 #endif 4471 rval = 1; 4472 break; 4473 } 4474 } 4475 4476 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4477 4478 regs[rd] = rval; 4479 break; 4480 } 4481 4482 case DIF_SUBR_SPECULATION: 4483 regs[rd] = dtrace_speculation(state); 4484 break; 4485 4486 case DIF_SUBR_COPYOUT: { 4487 uintptr_t kaddr = tupregs[0].dttk_value; 4488 uintptr_t uaddr = tupregs[1].dttk_value; 4489 uint64_t size = tupregs[2].dttk_value; 4490 4491 if (!dtrace_destructive_disallow && 4492 dtrace_priv_proc_control(state) && 4493 !dtrace_istoxic(kaddr, size) && 4494 dtrace_canload(kaddr, size, mstate, vstate)) { 4495 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4496 dtrace_copyout(kaddr, uaddr, size, flags); 4497 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4498 } 4499 break; 4500 } 4501 4502 case DIF_SUBR_COPYOUTSTR: { 4503 uintptr_t kaddr = tupregs[0].dttk_value; 4504 uintptr_t uaddr = tupregs[1].dttk_value; 4505 uint64_t size = tupregs[2].dttk_value; 4506 4507 if (!dtrace_destructive_disallow && 4508 dtrace_priv_proc_control(state) && 4509 !dtrace_istoxic(kaddr, size) && 4510 dtrace_strcanload(kaddr, size, mstate, vstate)) { 4511 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4512 dtrace_copyoutstr(kaddr, uaddr, size, flags); 4513 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4514 } 4515 break; 4516 } 4517 4518 case DIF_SUBR_STRLEN: { 4519 size_t sz; 4520 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4521 sz = dtrace_strlen((char *)addr, 4522 state->dts_options[DTRACEOPT_STRSIZE]); 4523 4524 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 4525 regs[rd] = 0; 4526 break; 4527 } 4528 4529 regs[rd] = sz; 4530 4531 break; 4532 } 4533 4534 case DIF_SUBR_STRCHR: 4535 case DIF_SUBR_STRRCHR: { 4536 /* 4537 * We're going to iterate over the string looking for the 4538 * specified character. We will iterate until we have reached 4539 * the string length or we have found the character. If this 4540 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4541 * of the specified character instead of the first. 4542 */ 4543 uintptr_t saddr = tupregs[0].dttk_value; 4544 uintptr_t addr = tupregs[0].dttk_value; 4545 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 4546 char c, target = (char)tupregs[1].dttk_value; 4547 4548 for (regs[rd] = 0; addr < limit; addr++) { 4549 if ((c = dtrace_load8(addr)) == target) { 4550 regs[rd] = addr; 4551 4552 if (subr == DIF_SUBR_STRCHR) 4553 break; 4554 } 4555 4556 if (c == '\0') 4557 break; 4558 } 4559 4560 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 4561 regs[rd] = 0; 4562 break; 4563 } 4564 4565 break; 4566 } 4567 4568 case DIF_SUBR_STRSTR: 4569 case DIF_SUBR_INDEX: 4570 case DIF_SUBR_RINDEX: { 4571 /* 4572 * We're going to iterate over the string looking for the 4573 * specified string. We will iterate until we have reached 4574 * the string length or we have found the string. (Yes, this 4575 * is done in the most naive way possible -- but considering 4576 * that the string we're searching for is likely to be 4577 * relatively short, the complexity of Rabin-Karp or similar 4578 * hardly seems merited.) 4579 */ 4580 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4581 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4582 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4583 size_t len = dtrace_strlen(addr, size); 4584 size_t sublen = dtrace_strlen(substr, size); 4585 char *limit = addr + len, *orig = addr; 4586 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4587 int inc = 1; 4588 4589 regs[rd] = notfound; 4590 4591 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4592 regs[rd] = 0; 4593 break; 4594 } 4595 4596 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4597 vstate)) { 4598 regs[rd] = 0; 4599 break; 4600 } 4601 4602 /* 4603 * strstr() and index()/rindex() have similar semantics if 4604 * both strings are the empty string: strstr() returns a 4605 * pointer to the (empty) string, and index() and rindex() 4606 * both return index 0 (regardless of any position argument). 4607 */ 4608 if (sublen == 0 && len == 0) { 4609 if (subr == DIF_SUBR_STRSTR) 4610 regs[rd] = (uintptr_t)addr; 4611 else 4612 regs[rd] = 0; 4613 break; 4614 } 4615 4616 if (subr != DIF_SUBR_STRSTR) { 4617 if (subr == DIF_SUBR_RINDEX) { 4618 limit = orig - 1; 4619 addr += len; 4620 inc = -1; 4621 } 4622 4623 /* 4624 * Both index() and rindex() take an optional position 4625 * argument that denotes the starting position. 4626 */ 4627 if (nargs == 3) { 4628 int64_t pos = (int64_t)tupregs[2].dttk_value; 4629 4630 /* 4631 * If the position argument to index() is 4632 * negative, Perl implicitly clamps it at 4633 * zero. This semantic is a little surprising 4634 * given the special meaning of negative 4635 * positions to similar Perl functions like 4636 * substr(), but it appears to reflect a 4637 * notion that index() can start from a 4638 * negative index and increment its way up to 4639 * the string. Given this notion, Perl's 4640 * rindex() is at least self-consistent in 4641 * that it implicitly clamps positions greater 4642 * than the string length to be the string 4643 * length. Where Perl completely loses 4644 * coherence, however, is when the specified 4645 * substring is the empty string (""). In 4646 * this case, even if the position is 4647 * negative, rindex() returns 0 -- and even if 4648 * the position is greater than the length, 4649 * index() returns the string length. These 4650 * semantics violate the notion that index() 4651 * should never return a value less than the 4652 * specified position and that rindex() should 4653 * never return a value greater than the 4654 * specified position. (One assumes that 4655 * these semantics are artifacts of Perl's 4656 * implementation and not the results of 4657 * deliberate design -- it beggars belief that 4658 * even Larry Wall could desire such oddness.) 4659 * While in the abstract one would wish for 4660 * consistent position semantics across 4661 * substr(), index() and rindex() -- or at the 4662 * very least self-consistent position 4663 * semantics for index() and rindex() -- we 4664 * instead opt to keep with the extant Perl 4665 * semantics, in all their broken glory. (Do 4666 * we have more desire to maintain Perl's 4667 * semantics than Perl does? Probably.) 4668 */ 4669 if (subr == DIF_SUBR_RINDEX) { 4670 if (pos < 0) { 4671 if (sublen == 0) 4672 regs[rd] = 0; 4673 break; 4674 } 4675 4676 if (pos > len) 4677 pos = len; 4678 } else { 4679 if (pos < 0) 4680 pos = 0; 4681 4682 if (pos >= len) { 4683 if (sublen == 0) 4684 regs[rd] = len; 4685 break; 4686 } 4687 } 4688 4689 addr = orig + pos; 4690 } 4691 } 4692 4693 for (regs[rd] = notfound; addr != limit; addr += inc) { 4694 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4695 if (subr != DIF_SUBR_STRSTR) { 4696 /* 4697 * As D index() and rindex() are 4698 * modeled on Perl (and not on awk), 4699 * we return a zero-based (and not a 4700 * one-based) index. (For you Perl 4701 * weenies: no, we're not going to add 4702 * $[ -- and shouldn't you be at a con 4703 * or something?) 4704 */ 4705 regs[rd] = (uintptr_t)(addr - orig); 4706 break; 4707 } 4708 4709 ASSERT(subr == DIF_SUBR_STRSTR); 4710 regs[rd] = (uintptr_t)addr; 4711 break; 4712 } 4713 } 4714 4715 break; 4716 } 4717 4718 case DIF_SUBR_STRTOK: { 4719 uintptr_t addr = tupregs[0].dttk_value; 4720 uintptr_t tokaddr = tupregs[1].dttk_value; 4721 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4722 uintptr_t limit, toklimit = tokaddr + size; 4723 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4724 char *dest = (char *)mstate->dtms_scratch_ptr; 4725 int i; 4726 4727 /* 4728 * Check both the token buffer and (later) the input buffer, 4729 * since both could be non-scratch addresses. 4730 */ 4731 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 4732 regs[rd] = 0; 4733 break; 4734 } 4735 4736 if (!DTRACE_INSCRATCH(mstate, size)) { 4737 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4738 regs[rd] = 0; 4739 break; 4740 } 4741 4742 if (addr == 0) { 4743 /* 4744 * If the address specified is NULL, we use our saved 4745 * strtok pointer from the mstate. Note that this 4746 * means that the saved strtok pointer is _only_ 4747 * valid within multiple enablings of the same probe -- 4748 * it behaves like an implicit clause-local variable. 4749 */ 4750 addr = mstate->dtms_strtok; 4751 } else { 4752 /* 4753 * If the user-specified address is non-NULL we must 4754 * access check it. This is the only time we have 4755 * a chance to do so, since this address may reside 4756 * in the string table of this clause-- future calls 4757 * (when we fetch addr from mstate->dtms_strtok) 4758 * would fail this access check. 4759 */ 4760 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 4761 regs[rd] = 0; 4762 break; 4763 } 4764 } 4765 4766 /* 4767 * First, zero the token map, and then process the token 4768 * string -- setting a bit in the map for every character 4769 * found in the token string. 4770 */ 4771 for (i = 0; i < sizeof (tokmap); i++) 4772 tokmap[i] = 0; 4773 4774 for (; tokaddr < toklimit; tokaddr++) { 4775 if ((c = dtrace_load8(tokaddr)) == '\0') 4776 break; 4777 4778 ASSERT((c >> 3) < sizeof (tokmap)); 4779 tokmap[c >> 3] |= (1 << (c & 0x7)); 4780 } 4781 4782 for (limit = addr + size; addr < limit; addr++) { 4783 /* 4784 * We're looking for a character that is _not_ contained 4785 * in the token string. 4786 */ 4787 if ((c = dtrace_load8(addr)) == '\0') 4788 break; 4789 4790 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4791 break; 4792 } 4793 4794 if (c == '\0') { 4795 /* 4796 * We reached the end of the string without finding 4797 * any character that was not in the token string. 4798 * We return NULL in this case, and we set the saved 4799 * address to NULL as well. 4800 */ 4801 regs[rd] = 0; 4802 mstate->dtms_strtok = 0; 4803 break; 4804 } 4805 4806 /* 4807 * From here on, we're copying into the destination string. 4808 */ 4809 for (i = 0; addr < limit && i < size - 1; addr++) { 4810 if ((c = dtrace_load8(addr)) == '\0') 4811 break; 4812 4813 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4814 break; 4815 4816 ASSERT(i < size); 4817 dest[i++] = c; 4818 } 4819 4820 ASSERT(i < size); 4821 dest[i] = '\0'; 4822 regs[rd] = (uintptr_t)dest; 4823 mstate->dtms_scratch_ptr += size; 4824 mstate->dtms_strtok = addr; 4825 break; 4826 } 4827 4828 case DIF_SUBR_SUBSTR: { 4829 uintptr_t s = tupregs[0].dttk_value; 4830 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4831 char *d = (char *)mstate->dtms_scratch_ptr; 4832 int64_t index = (int64_t)tupregs[1].dttk_value; 4833 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4834 size_t len = dtrace_strlen((char *)s, size); 4835 int64_t i; 4836 4837 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4838 regs[rd] = 0; 4839 break; 4840 } 4841 4842 if (!DTRACE_INSCRATCH(mstate, size)) { 4843 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4844 regs[rd] = 0; 4845 break; 4846 } 4847 4848 if (nargs <= 2) 4849 remaining = (int64_t)size; 4850 4851 if (index < 0) { 4852 index += len; 4853 4854 if (index < 0 && index + remaining > 0) { 4855 remaining += index; 4856 index = 0; 4857 } 4858 } 4859 4860 if (index >= len || index < 0) { 4861 remaining = 0; 4862 } else if (remaining < 0) { 4863 remaining += len - index; 4864 } else if (index + remaining > size) { 4865 remaining = size - index; 4866 } 4867 4868 for (i = 0; i < remaining; i++) { 4869 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4870 break; 4871 } 4872 4873 d[i] = '\0'; 4874 4875 mstate->dtms_scratch_ptr += size; 4876 regs[rd] = (uintptr_t)d; 4877 break; 4878 } 4879 4880 case DIF_SUBR_JSON: { 4881 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4882 uintptr_t json = tupregs[0].dttk_value; 4883 size_t jsonlen = dtrace_strlen((char *)json, size); 4884 uintptr_t elem = tupregs[1].dttk_value; 4885 size_t elemlen = dtrace_strlen((char *)elem, size); 4886 4887 char *dest = (char *)mstate->dtms_scratch_ptr; 4888 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 4889 char *ee = elemlist; 4890 int nelems = 1; 4891 uintptr_t cur; 4892 4893 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 4894 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 4895 regs[rd] = 0; 4896 break; 4897 } 4898 4899 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 4900 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4901 regs[rd] = 0; 4902 break; 4903 } 4904 4905 /* 4906 * Read the element selector and split it up into a packed list 4907 * of strings. 4908 */ 4909 for (cur = elem; cur < elem + elemlen; cur++) { 4910 char cc = dtrace_load8(cur); 4911 4912 if (cur == elem && cc == '[') { 4913 /* 4914 * If the first element selector key is 4915 * actually an array index then ignore the 4916 * bracket. 4917 */ 4918 continue; 4919 } 4920 4921 if (cc == ']') 4922 continue; 4923 4924 if (cc == '.' || cc == '[') { 4925 nelems++; 4926 cc = '\0'; 4927 } 4928 4929 *ee++ = cc; 4930 } 4931 *ee++ = '\0'; 4932 4933 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 4934 nelems, dest)) != 0) 4935 mstate->dtms_scratch_ptr += jsonlen + 1; 4936 break; 4937 } 4938 4939 case DIF_SUBR_TOUPPER: 4940 case DIF_SUBR_TOLOWER: { 4941 uintptr_t s = tupregs[0].dttk_value; 4942 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4943 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4944 size_t len = dtrace_strlen((char *)s, size); 4945 char lower, upper, convert; 4946 int64_t i; 4947 4948 if (subr == DIF_SUBR_TOUPPER) { 4949 lower = 'a'; 4950 upper = 'z'; 4951 convert = 'A'; 4952 } else { 4953 lower = 'A'; 4954 upper = 'Z'; 4955 convert = 'a'; 4956 } 4957 4958 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4959 regs[rd] = 0; 4960 break; 4961 } 4962 4963 if (!DTRACE_INSCRATCH(mstate, size)) { 4964 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4965 regs[rd] = 0; 4966 break; 4967 } 4968 4969 for (i = 0; i < size - 1; i++) { 4970 if ((c = dtrace_load8(s + i)) == '\0') 4971 break; 4972 4973 if (c >= lower && c <= upper) 4974 c = convert + (c - lower); 4975 4976 dest[i] = c; 4977 } 4978 4979 ASSERT(i < size); 4980 dest[i] = '\0'; 4981 regs[rd] = (uintptr_t)dest; 4982 mstate->dtms_scratch_ptr += size; 4983 break; 4984 } 4985 4986 #ifdef illumos 4987 case DIF_SUBR_GETMAJOR: 4988 #ifdef _LP64 4989 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4990 #else 4991 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4992 #endif 4993 break; 4994 4995 case DIF_SUBR_GETMINOR: 4996 #ifdef _LP64 4997 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4998 #else 4999 regs[rd] = tupregs[0].dttk_value & MAXMIN; 5000 #endif 5001 break; 5002 5003 case DIF_SUBR_DDI_PATHNAME: { 5004 /* 5005 * This one is a galactic mess. We are going to roughly 5006 * emulate ddi_pathname(), but it's made more complicated 5007 * by the fact that we (a) want to include the minor name and 5008 * (b) must proceed iteratively instead of recursively. 5009 */ 5010 uintptr_t dest = mstate->dtms_scratch_ptr; 5011 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5012 char *start = (char *)dest, *end = start + size - 1; 5013 uintptr_t daddr = tupregs[0].dttk_value; 5014 int64_t minor = (int64_t)tupregs[1].dttk_value; 5015 char *s; 5016 int i, len, depth = 0; 5017 5018 /* 5019 * Due to all the pointer jumping we do and context we must 5020 * rely upon, we just mandate that the user must have kernel 5021 * read privileges to use this routine. 5022 */ 5023 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 5024 *flags |= CPU_DTRACE_KPRIV; 5025 *illval = daddr; 5026 regs[rd] = 0; 5027 } 5028 5029 if (!DTRACE_INSCRATCH(mstate, size)) { 5030 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5031 regs[rd] = 0; 5032 break; 5033 } 5034 5035 *end = '\0'; 5036 5037 /* 5038 * We want to have a name for the minor. In order to do this, 5039 * we need to walk the minor list from the devinfo. We want 5040 * to be sure that we don't infinitely walk a circular list, 5041 * so we check for circularity by sending a scout pointer 5042 * ahead two elements for every element that we iterate over; 5043 * if the list is circular, these will ultimately point to the 5044 * same element. You may recognize this little trick as the 5045 * answer to a stupid interview question -- one that always 5046 * seems to be asked by those who had to have it laboriously 5047 * explained to them, and who can't even concisely describe 5048 * the conditions under which one would be forced to resort to 5049 * this technique. Needless to say, those conditions are 5050 * found here -- and probably only here. Is this the only use 5051 * of this infamous trick in shipping, production code? If it 5052 * isn't, it probably should be... 5053 */ 5054 if (minor != -1) { 5055 uintptr_t maddr = dtrace_loadptr(daddr + 5056 offsetof(struct dev_info, devi_minor)); 5057 5058 uintptr_t next = offsetof(struct ddi_minor_data, next); 5059 uintptr_t name = offsetof(struct ddi_minor_data, 5060 d_minor) + offsetof(struct ddi_minor, name); 5061 uintptr_t dev = offsetof(struct ddi_minor_data, 5062 d_minor) + offsetof(struct ddi_minor, dev); 5063 uintptr_t scout; 5064 5065 if (maddr != NULL) 5066 scout = dtrace_loadptr(maddr + next); 5067 5068 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5069 uint64_t m; 5070 #ifdef _LP64 5071 m = dtrace_load64(maddr + dev) & MAXMIN64; 5072 #else 5073 m = dtrace_load32(maddr + dev) & MAXMIN; 5074 #endif 5075 if (m != minor) { 5076 maddr = dtrace_loadptr(maddr + next); 5077 5078 if (scout == NULL) 5079 continue; 5080 5081 scout = dtrace_loadptr(scout + next); 5082 5083 if (scout == NULL) 5084 continue; 5085 5086 scout = dtrace_loadptr(scout + next); 5087 5088 if (scout == NULL) 5089 continue; 5090 5091 if (scout == maddr) { 5092 *flags |= CPU_DTRACE_ILLOP; 5093 break; 5094 } 5095 5096 continue; 5097 } 5098 5099 /* 5100 * We have the minor data. Now we need to 5101 * copy the minor's name into the end of the 5102 * pathname. 5103 */ 5104 s = (char *)dtrace_loadptr(maddr + name); 5105 len = dtrace_strlen(s, size); 5106 5107 if (*flags & CPU_DTRACE_FAULT) 5108 break; 5109 5110 if (len != 0) { 5111 if ((end -= (len + 1)) < start) 5112 break; 5113 5114 *end = ':'; 5115 } 5116 5117 for (i = 1; i <= len; i++) 5118 end[i] = dtrace_load8((uintptr_t)s++); 5119 break; 5120 } 5121 } 5122 5123 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5124 ddi_node_state_t devi_state; 5125 5126 devi_state = dtrace_load32(daddr + 5127 offsetof(struct dev_info, devi_node_state)); 5128 5129 if (*flags & CPU_DTRACE_FAULT) 5130 break; 5131 5132 if (devi_state >= DS_INITIALIZED) { 5133 s = (char *)dtrace_loadptr(daddr + 5134 offsetof(struct dev_info, devi_addr)); 5135 len = dtrace_strlen(s, size); 5136 5137 if (*flags & CPU_DTRACE_FAULT) 5138 break; 5139 5140 if (len != 0) { 5141 if ((end -= (len + 1)) < start) 5142 break; 5143 5144 *end = '@'; 5145 } 5146 5147 for (i = 1; i <= len; i++) 5148 end[i] = dtrace_load8((uintptr_t)s++); 5149 } 5150 5151 /* 5152 * Now for the node name... 5153 */ 5154 s = (char *)dtrace_loadptr(daddr + 5155 offsetof(struct dev_info, devi_node_name)); 5156 5157 daddr = dtrace_loadptr(daddr + 5158 offsetof(struct dev_info, devi_parent)); 5159 5160 /* 5161 * If our parent is NULL (that is, if we're the root 5162 * node), we're going to use the special path 5163 * "devices". 5164 */ 5165 if (daddr == 0) 5166 s = "devices"; 5167 5168 len = dtrace_strlen(s, size); 5169 if (*flags & CPU_DTRACE_FAULT) 5170 break; 5171 5172 if ((end -= (len + 1)) < start) 5173 break; 5174 5175 for (i = 1; i <= len; i++) 5176 end[i] = dtrace_load8((uintptr_t)s++); 5177 *end = '/'; 5178 5179 if (depth++ > dtrace_devdepth_max) { 5180 *flags |= CPU_DTRACE_ILLOP; 5181 break; 5182 } 5183 } 5184 5185 if (end < start) 5186 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5187 5188 if (daddr == 0) { 5189 regs[rd] = (uintptr_t)end; 5190 mstate->dtms_scratch_ptr += size; 5191 } 5192 5193 break; 5194 } 5195 #endif 5196 5197 case DIF_SUBR_STRJOIN: { 5198 char *d = (char *)mstate->dtms_scratch_ptr; 5199 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5200 uintptr_t s1 = tupregs[0].dttk_value; 5201 uintptr_t s2 = tupregs[1].dttk_value; 5202 int i = 0; 5203 5204 if (!dtrace_strcanload(s1, size, mstate, vstate) || 5205 !dtrace_strcanload(s2, size, mstate, vstate)) { 5206 regs[rd] = 0; 5207 break; 5208 } 5209 5210 if (!DTRACE_INSCRATCH(mstate, size)) { 5211 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5212 regs[rd] = 0; 5213 break; 5214 } 5215 5216 for (;;) { 5217 if (i >= size) { 5218 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5219 regs[rd] = 0; 5220 break; 5221 } 5222 5223 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 5224 i--; 5225 break; 5226 } 5227 } 5228 5229 for (;;) { 5230 if (i >= size) { 5231 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5232 regs[rd] = 0; 5233 break; 5234 } 5235 5236 if ((d[i++] = dtrace_load8(s2++)) == '\0') 5237 break; 5238 } 5239 5240 if (i < size) { 5241 mstate->dtms_scratch_ptr += i; 5242 regs[rd] = (uintptr_t)d; 5243 } 5244 5245 break; 5246 } 5247 5248 case DIF_SUBR_STRTOLL: { 5249 uintptr_t s = tupregs[0].dttk_value; 5250 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5251 int base = 10; 5252 5253 if (nargs > 1) { 5254 if ((base = tupregs[1].dttk_value) <= 1 || 5255 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5256 *flags |= CPU_DTRACE_ILLOP; 5257 break; 5258 } 5259 } 5260 5261 if (!dtrace_strcanload(s, size, mstate, vstate)) { 5262 regs[rd] = INT64_MIN; 5263 break; 5264 } 5265 5266 regs[rd] = dtrace_strtoll((char *)s, base, size); 5267 break; 5268 } 5269 5270 case DIF_SUBR_LLTOSTR: { 5271 int64_t i = (int64_t)tupregs[0].dttk_value; 5272 uint64_t val, digit; 5273 uint64_t size = 65; /* enough room for 2^64 in binary */ 5274 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5275 int base = 10; 5276 5277 if (nargs > 1) { 5278 if ((base = tupregs[1].dttk_value) <= 1 || 5279 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5280 *flags |= CPU_DTRACE_ILLOP; 5281 break; 5282 } 5283 } 5284 5285 val = (base == 10 && i < 0) ? i * -1 : i; 5286 5287 if (!DTRACE_INSCRATCH(mstate, size)) { 5288 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5289 regs[rd] = 0; 5290 break; 5291 } 5292 5293 for (*end-- = '\0'; val; val /= base) { 5294 if ((digit = val % base) <= '9' - '0') { 5295 *end-- = '0' + digit; 5296 } else { 5297 *end-- = 'a' + (digit - ('9' - '0') - 1); 5298 } 5299 } 5300 5301 if (i == 0 && base == 16) 5302 *end-- = '0'; 5303 5304 if (base == 16) 5305 *end-- = 'x'; 5306 5307 if (i == 0 || base == 8 || base == 16) 5308 *end-- = '0'; 5309 5310 if (i < 0 && base == 10) 5311 *end-- = '-'; 5312 5313 regs[rd] = (uintptr_t)end + 1; 5314 mstate->dtms_scratch_ptr += size; 5315 break; 5316 } 5317 5318 case DIF_SUBR_HTONS: 5319 case DIF_SUBR_NTOHS: 5320 #if BYTE_ORDER == BIG_ENDIAN 5321 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5322 #else 5323 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5324 #endif 5325 break; 5326 5327 5328 case DIF_SUBR_HTONL: 5329 case DIF_SUBR_NTOHL: 5330 #if BYTE_ORDER == BIG_ENDIAN 5331 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5332 #else 5333 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5334 #endif 5335 break; 5336 5337 5338 case DIF_SUBR_HTONLL: 5339 case DIF_SUBR_NTOHLL: 5340 #if BYTE_ORDER == BIG_ENDIAN 5341 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5342 #else 5343 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5344 #endif 5345 break; 5346 5347 5348 case DIF_SUBR_DIRNAME: 5349 case DIF_SUBR_BASENAME: { 5350 char *dest = (char *)mstate->dtms_scratch_ptr; 5351 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5352 uintptr_t src = tupregs[0].dttk_value; 5353 int i, j, len = dtrace_strlen((char *)src, size); 5354 int lastbase = -1, firstbase = -1, lastdir = -1; 5355 int start, end; 5356 5357 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5358 regs[rd] = 0; 5359 break; 5360 } 5361 5362 if (!DTRACE_INSCRATCH(mstate, size)) { 5363 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5364 regs[rd] = 0; 5365 break; 5366 } 5367 5368 /* 5369 * The basename and dirname for a zero-length string is 5370 * defined to be "." 5371 */ 5372 if (len == 0) { 5373 len = 1; 5374 src = (uintptr_t)"."; 5375 } 5376 5377 /* 5378 * Start from the back of the string, moving back toward the 5379 * front until we see a character that isn't a slash. That 5380 * character is the last character in the basename. 5381 */ 5382 for (i = len - 1; i >= 0; i--) { 5383 if (dtrace_load8(src + i) != '/') 5384 break; 5385 } 5386 5387 if (i >= 0) 5388 lastbase = i; 5389 5390 /* 5391 * Starting from the last character in the basename, move 5392 * towards the front until we find a slash. The character 5393 * that we processed immediately before that is the first 5394 * character in the basename. 5395 */ 5396 for (; i >= 0; i--) { 5397 if (dtrace_load8(src + i) == '/') 5398 break; 5399 } 5400 5401 if (i >= 0) 5402 firstbase = i + 1; 5403 5404 /* 5405 * Now keep going until we find a non-slash character. That 5406 * character is the last character in the dirname. 5407 */ 5408 for (; i >= 0; i--) { 5409 if (dtrace_load8(src + i) != '/') 5410 break; 5411 } 5412 5413 if (i >= 0) 5414 lastdir = i; 5415 5416 ASSERT(!(lastbase == -1 && firstbase != -1)); 5417 ASSERT(!(firstbase == -1 && lastdir != -1)); 5418 5419 if (lastbase == -1) { 5420 /* 5421 * We didn't find a non-slash character. We know that 5422 * the length is non-zero, so the whole string must be 5423 * slashes. In either the dirname or the basename 5424 * case, we return '/'. 5425 */ 5426 ASSERT(firstbase == -1); 5427 firstbase = lastbase = lastdir = 0; 5428 } 5429 5430 if (firstbase == -1) { 5431 /* 5432 * The entire string consists only of a basename 5433 * component. If we're looking for dirname, we need 5434 * to change our string to be just "."; if we're 5435 * looking for a basename, we'll just set the first 5436 * character of the basename to be 0. 5437 */ 5438 if (subr == DIF_SUBR_DIRNAME) { 5439 ASSERT(lastdir == -1); 5440 src = (uintptr_t)"."; 5441 lastdir = 0; 5442 } else { 5443 firstbase = 0; 5444 } 5445 } 5446 5447 if (subr == DIF_SUBR_DIRNAME) { 5448 if (lastdir == -1) { 5449 /* 5450 * We know that we have a slash in the name -- 5451 * or lastdir would be set to 0, above. And 5452 * because lastdir is -1, we know that this 5453 * slash must be the first character. (That 5454 * is, the full string must be of the form 5455 * "/basename".) In this case, the last 5456 * character of the directory name is 0. 5457 */ 5458 lastdir = 0; 5459 } 5460 5461 start = 0; 5462 end = lastdir; 5463 } else { 5464 ASSERT(subr == DIF_SUBR_BASENAME); 5465 ASSERT(firstbase != -1 && lastbase != -1); 5466 start = firstbase; 5467 end = lastbase; 5468 } 5469 5470 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5471 dest[j] = dtrace_load8(src + i); 5472 5473 dest[j] = '\0'; 5474 regs[rd] = (uintptr_t)dest; 5475 mstate->dtms_scratch_ptr += size; 5476 break; 5477 } 5478 5479 case DIF_SUBR_GETF: { 5480 uintptr_t fd = tupregs[0].dttk_value; 5481 struct filedesc *fdp; 5482 file_t *fp; 5483 5484 if (!dtrace_priv_proc(state)) { 5485 regs[rd] = 0; 5486 break; 5487 } 5488 fdp = curproc->p_fd; 5489 FILEDESC_SLOCK(fdp); 5490 fp = fget_locked(fdp, fd); 5491 mstate->dtms_getf = fp; 5492 regs[rd] = (uintptr_t)fp; 5493 FILEDESC_SUNLOCK(fdp); 5494 break; 5495 } 5496 5497 case DIF_SUBR_CLEANPATH: { 5498 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5499 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5500 uintptr_t src = tupregs[0].dttk_value; 5501 int i = 0, j = 0; 5502 #ifdef illumos 5503 zone_t *z; 5504 #endif 5505 5506 if (!dtrace_strcanload(src, size, mstate, vstate)) { 5507 regs[rd] = 0; 5508 break; 5509 } 5510 5511 if (!DTRACE_INSCRATCH(mstate, size)) { 5512 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5513 regs[rd] = 0; 5514 break; 5515 } 5516 5517 /* 5518 * Move forward, loading each character. 5519 */ 5520 do { 5521 c = dtrace_load8(src + i++); 5522 next: 5523 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5524 break; 5525 5526 if (c != '/') { 5527 dest[j++] = c; 5528 continue; 5529 } 5530 5531 c = dtrace_load8(src + i++); 5532 5533 if (c == '/') { 5534 /* 5535 * We have two slashes -- we can just advance 5536 * to the next character. 5537 */ 5538 goto next; 5539 } 5540 5541 if (c != '.') { 5542 /* 5543 * This is not "." and it's not ".." -- we can 5544 * just store the "/" and this character and 5545 * drive on. 5546 */ 5547 dest[j++] = '/'; 5548 dest[j++] = c; 5549 continue; 5550 } 5551 5552 c = dtrace_load8(src + i++); 5553 5554 if (c == '/') { 5555 /* 5556 * This is a "/./" component. We're not going 5557 * to store anything in the destination buffer; 5558 * we're just going to go to the next component. 5559 */ 5560 goto next; 5561 } 5562 5563 if (c != '.') { 5564 /* 5565 * This is not ".." -- we can just store the 5566 * "/." and this character and continue 5567 * processing. 5568 */ 5569 dest[j++] = '/'; 5570 dest[j++] = '.'; 5571 dest[j++] = c; 5572 continue; 5573 } 5574 5575 c = dtrace_load8(src + i++); 5576 5577 if (c != '/' && c != '\0') { 5578 /* 5579 * This is not ".." -- it's "..[mumble]". 5580 * We'll store the "/.." and this character 5581 * and continue processing. 5582 */ 5583 dest[j++] = '/'; 5584 dest[j++] = '.'; 5585 dest[j++] = '.'; 5586 dest[j++] = c; 5587 continue; 5588 } 5589 5590 /* 5591 * This is "/../" or "/..\0". We need to back up 5592 * our destination pointer until we find a "/". 5593 */ 5594 i--; 5595 while (j != 0 && dest[--j] != '/') 5596 continue; 5597 5598 if (c == '\0') 5599 dest[++j] = '/'; 5600 } while (c != '\0'); 5601 5602 dest[j] = '\0'; 5603 5604 #ifdef illumos 5605 if (mstate->dtms_getf != NULL && 5606 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5607 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5608 /* 5609 * If we've done a getf() as a part of this ECB and we 5610 * don't have kernel access (and we're not in the global 5611 * zone), check if the path we cleaned up begins with 5612 * the zone's root path, and trim it off if so. Note 5613 * that this is an output cleanliness issue, not a 5614 * security issue: knowing one's zone root path does 5615 * not enable privilege escalation. 5616 */ 5617 if (strstr(dest, z->zone_rootpath) == dest) 5618 dest += strlen(z->zone_rootpath) - 1; 5619 } 5620 #endif 5621 5622 regs[rd] = (uintptr_t)dest; 5623 mstate->dtms_scratch_ptr += size; 5624 break; 5625 } 5626 5627 case DIF_SUBR_INET_NTOA: 5628 case DIF_SUBR_INET_NTOA6: 5629 case DIF_SUBR_INET_NTOP: { 5630 size_t size; 5631 int af, argi, i; 5632 char *base, *end; 5633 5634 if (subr == DIF_SUBR_INET_NTOP) { 5635 af = (int)tupregs[0].dttk_value; 5636 argi = 1; 5637 } else { 5638 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5639 argi = 0; 5640 } 5641 5642 if (af == AF_INET) { 5643 ipaddr_t ip4; 5644 uint8_t *ptr8, val; 5645 5646 if (!dtrace_canload(tupregs[argi].dttk_value, 5647 sizeof (ipaddr_t), mstate, vstate)) { 5648 regs[rd] = 0; 5649 break; 5650 } 5651 5652 /* 5653 * Safely load the IPv4 address. 5654 */ 5655 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5656 5657 /* 5658 * Check an IPv4 string will fit in scratch. 5659 */ 5660 size = INET_ADDRSTRLEN; 5661 if (!DTRACE_INSCRATCH(mstate, size)) { 5662 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5663 regs[rd] = 0; 5664 break; 5665 } 5666 base = (char *)mstate->dtms_scratch_ptr; 5667 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5668 5669 /* 5670 * Stringify as a dotted decimal quad. 5671 */ 5672 *end-- = '\0'; 5673 ptr8 = (uint8_t *)&ip4; 5674 for (i = 3; i >= 0; i--) { 5675 val = ptr8[i]; 5676 5677 if (val == 0) { 5678 *end-- = '0'; 5679 } else { 5680 for (; val; val /= 10) { 5681 *end-- = '0' + (val % 10); 5682 } 5683 } 5684 5685 if (i > 0) 5686 *end-- = '.'; 5687 } 5688 ASSERT(end + 1 >= base); 5689 5690 } else if (af == AF_INET6) { 5691 struct in6_addr ip6; 5692 int firstzero, tryzero, numzero, v6end; 5693 uint16_t val; 5694 const char digits[] = "0123456789abcdef"; 5695 5696 /* 5697 * Stringify using RFC 1884 convention 2 - 16 bit 5698 * hexadecimal values with a zero-run compression. 5699 * Lower case hexadecimal digits are used. 5700 * eg, fe80::214:4fff:fe0b:76c8. 5701 * The IPv4 embedded form is returned for inet_ntop, 5702 * just the IPv4 string is returned for inet_ntoa6. 5703 */ 5704 5705 if (!dtrace_canload(tupregs[argi].dttk_value, 5706 sizeof (struct in6_addr), mstate, vstate)) { 5707 regs[rd] = 0; 5708 break; 5709 } 5710 5711 /* 5712 * Safely load the IPv6 address. 5713 */ 5714 dtrace_bcopy( 5715 (void *)(uintptr_t)tupregs[argi].dttk_value, 5716 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5717 5718 /* 5719 * Check an IPv6 string will fit in scratch. 5720 */ 5721 size = INET6_ADDRSTRLEN; 5722 if (!DTRACE_INSCRATCH(mstate, size)) { 5723 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5724 regs[rd] = 0; 5725 break; 5726 } 5727 base = (char *)mstate->dtms_scratch_ptr; 5728 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5729 *end-- = '\0'; 5730 5731 /* 5732 * Find the longest run of 16 bit zero values 5733 * for the single allowed zero compression - "::". 5734 */ 5735 firstzero = -1; 5736 tryzero = -1; 5737 numzero = 1; 5738 for (i = 0; i < sizeof (struct in6_addr); i++) { 5739 #ifdef illumos 5740 if (ip6._S6_un._S6_u8[i] == 0 && 5741 #else 5742 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5743 #endif 5744 tryzero == -1 && i % 2 == 0) { 5745 tryzero = i; 5746 continue; 5747 } 5748 5749 if (tryzero != -1 && 5750 #ifdef illumos 5751 (ip6._S6_un._S6_u8[i] != 0 || 5752 #else 5753 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5754 #endif 5755 i == sizeof (struct in6_addr) - 1)) { 5756 5757 if (i - tryzero <= numzero) { 5758 tryzero = -1; 5759 continue; 5760 } 5761 5762 firstzero = tryzero; 5763 numzero = i - i % 2 - tryzero; 5764 tryzero = -1; 5765 5766 #ifdef illumos 5767 if (ip6._S6_un._S6_u8[i] == 0 && 5768 #else 5769 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5770 #endif 5771 i == sizeof (struct in6_addr) - 1) 5772 numzero += 2; 5773 } 5774 } 5775 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5776 5777 /* 5778 * Check for an IPv4 embedded address. 5779 */ 5780 v6end = sizeof (struct in6_addr) - 2; 5781 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5782 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5783 for (i = sizeof (struct in6_addr) - 1; 5784 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5785 ASSERT(end >= base); 5786 5787 #ifdef illumos 5788 val = ip6._S6_un._S6_u8[i]; 5789 #else 5790 val = ip6.__u6_addr.__u6_addr8[i]; 5791 #endif 5792 5793 if (val == 0) { 5794 *end-- = '0'; 5795 } else { 5796 for (; val; val /= 10) { 5797 *end-- = '0' + val % 10; 5798 } 5799 } 5800 5801 if (i > DTRACE_V4MAPPED_OFFSET) 5802 *end-- = '.'; 5803 } 5804 5805 if (subr == DIF_SUBR_INET_NTOA6) 5806 goto inetout; 5807 5808 /* 5809 * Set v6end to skip the IPv4 address that 5810 * we have already stringified. 5811 */ 5812 v6end = 10; 5813 } 5814 5815 /* 5816 * Build the IPv6 string by working through the 5817 * address in reverse. 5818 */ 5819 for (i = v6end; i >= 0; i -= 2) { 5820 ASSERT(end >= base); 5821 5822 if (i == firstzero + numzero - 2) { 5823 *end-- = ':'; 5824 *end-- = ':'; 5825 i -= numzero - 2; 5826 continue; 5827 } 5828 5829 if (i < 14 && i != firstzero - 2) 5830 *end-- = ':'; 5831 5832 #ifdef illumos 5833 val = (ip6._S6_un._S6_u8[i] << 8) + 5834 ip6._S6_un._S6_u8[i + 1]; 5835 #else 5836 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 5837 ip6.__u6_addr.__u6_addr8[i + 1]; 5838 #endif 5839 5840 if (val == 0) { 5841 *end-- = '0'; 5842 } else { 5843 for (; val; val /= 16) { 5844 *end-- = digits[val % 16]; 5845 } 5846 } 5847 } 5848 ASSERT(end + 1 >= base); 5849 5850 } else { 5851 /* 5852 * The user didn't use AH_INET or AH_INET6. 5853 */ 5854 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5855 regs[rd] = 0; 5856 break; 5857 } 5858 5859 inetout: regs[rd] = (uintptr_t)end + 1; 5860 mstate->dtms_scratch_ptr += size; 5861 break; 5862 } 5863 5864 case DIF_SUBR_MEMREF: { 5865 uintptr_t size = 2 * sizeof(uintptr_t); 5866 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5867 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 5868 5869 /* address and length */ 5870 memref[0] = tupregs[0].dttk_value; 5871 memref[1] = tupregs[1].dttk_value; 5872 5873 regs[rd] = (uintptr_t) memref; 5874 mstate->dtms_scratch_ptr += scratch_size; 5875 break; 5876 } 5877 5878 #ifndef illumos 5879 case DIF_SUBR_MEMSTR: { 5880 char *str = (char *)mstate->dtms_scratch_ptr; 5881 uintptr_t mem = tupregs[0].dttk_value; 5882 char c = tupregs[1].dttk_value; 5883 size_t size = tupregs[2].dttk_value; 5884 uint8_t n; 5885 int i; 5886 5887 regs[rd] = 0; 5888 5889 if (size == 0) 5890 break; 5891 5892 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 5893 break; 5894 5895 if (!DTRACE_INSCRATCH(mstate, size)) { 5896 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5897 break; 5898 } 5899 5900 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 5901 *flags |= CPU_DTRACE_ILLOP; 5902 break; 5903 } 5904 5905 for (i = 0; i < size - 1; i++) { 5906 n = dtrace_load8(mem++); 5907 str[i] = (n == 0) ? c : n; 5908 } 5909 str[size - 1] = 0; 5910 5911 regs[rd] = (uintptr_t)str; 5912 mstate->dtms_scratch_ptr += size; 5913 break; 5914 } 5915 #endif 5916 5917 case DIF_SUBR_TYPEREF: { 5918 uintptr_t size = 4 * sizeof(uintptr_t); 5919 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5920 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 5921 5922 /* address, num_elements, type_str, type_len */ 5923 typeref[0] = tupregs[0].dttk_value; 5924 typeref[1] = tupregs[1].dttk_value; 5925 typeref[2] = tupregs[2].dttk_value; 5926 typeref[3] = tupregs[3].dttk_value; 5927 5928 regs[rd] = (uintptr_t) typeref; 5929 mstate->dtms_scratch_ptr += scratch_size; 5930 break; 5931 } 5932 } 5933 } 5934 5935 /* 5936 * Emulate the execution of DTrace IR instructions specified by the given 5937 * DIF object. This function is deliberately void of assertions as all of 5938 * the necessary checks are handled by a call to dtrace_difo_validate(). 5939 */ 5940 static uint64_t 5941 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 5942 dtrace_vstate_t *vstate, dtrace_state_t *state) 5943 { 5944 const dif_instr_t *text = difo->dtdo_buf; 5945 const uint_t textlen = difo->dtdo_len; 5946 const char *strtab = difo->dtdo_strtab; 5947 const uint64_t *inttab = difo->dtdo_inttab; 5948 5949 uint64_t rval = 0; 5950 dtrace_statvar_t *svar; 5951 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 5952 dtrace_difv_t *v; 5953 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5954 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 5955 5956 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 5957 uint64_t regs[DIF_DIR_NREGS]; 5958 uint64_t *tmp; 5959 5960 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 5961 int64_t cc_r; 5962 uint_t pc = 0, id, opc = 0; 5963 uint8_t ttop = 0; 5964 dif_instr_t instr; 5965 uint_t r1, r2, rd; 5966 5967 /* 5968 * We stash the current DIF object into the machine state: we need it 5969 * for subsequent access checking. 5970 */ 5971 mstate->dtms_difo = difo; 5972 5973 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 5974 5975 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 5976 opc = pc; 5977 5978 instr = text[pc++]; 5979 r1 = DIF_INSTR_R1(instr); 5980 r2 = DIF_INSTR_R2(instr); 5981 rd = DIF_INSTR_RD(instr); 5982 5983 switch (DIF_INSTR_OP(instr)) { 5984 case DIF_OP_OR: 5985 regs[rd] = regs[r1] | regs[r2]; 5986 break; 5987 case DIF_OP_XOR: 5988 regs[rd] = regs[r1] ^ regs[r2]; 5989 break; 5990 case DIF_OP_AND: 5991 regs[rd] = regs[r1] & regs[r2]; 5992 break; 5993 case DIF_OP_SLL: 5994 regs[rd] = regs[r1] << regs[r2]; 5995 break; 5996 case DIF_OP_SRL: 5997 regs[rd] = regs[r1] >> regs[r2]; 5998 break; 5999 case DIF_OP_SUB: 6000 regs[rd] = regs[r1] - regs[r2]; 6001 break; 6002 case DIF_OP_ADD: 6003 regs[rd] = regs[r1] + regs[r2]; 6004 break; 6005 case DIF_OP_MUL: 6006 regs[rd] = regs[r1] * regs[r2]; 6007 break; 6008 case DIF_OP_SDIV: 6009 if (regs[r2] == 0) { 6010 regs[rd] = 0; 6011 *flags |= CPU_DTRACE_DIVZERO; 6012 } else { 6013 regs[rd] = (int64_t)regs[r1] / 6014 (int64_t)regs[r2]; 6015 } 6016 break; 6017 6018 case DIF_OP_UDIV: 6019 if (regs[r2] == 0) { 6020 regs[rd] = 0; 6021 *flags |= CPU_DTRACE_DIVZERO; 6022 } else { 6023 regs[rd] = regs[r1] / regs[r2]; 6024 } 6025 break; 6026 6027 case DIF_OP_SREM: 6028 if (regs[r2] == 0) { 6029 regs[rd] = 0; 6030 *flags |= CPU_DTRACE_DIVZERO; 6031 } else { 6032 regs[rd] = (int64_t)regs[r1] % 6033 (int64_t)regs[r2]; 6034 } 6035 break; 6036 6037 case DIF_OP_UREM: 6038 if (regs[r2] == 0) { 6039 regs[rd] = 0; 6040 *flags |= CPU_DTRACE_DIVZERO; 6041 } else { 6042 regs[rd] = regs[r1] % regs[r2]; 6043 } 6044 break; 6045 6046 case DIF_OP_NOT: 6047 regs[rd] = ~regs[r1]; 6048 break; 6049 case DIF_OP_MOV: 6050 regs[rd] = regs[r1]; 6051 break; 6052 case DIF_OP_CMP: 6053 cc_r = regs[r1] - regs[r2]; 6054 cc_n = cc_r < 0; 6055 cc_z = cc_r == 0; 6056 cc_v = 0; 6057 cc_c = regs[r1] < regs[r2]; 6058 break; 6059 case DIF_OP_TST: 6060 cc_n = cc_v = cc_c = 0; 6061 cc_z = regs[r1] == 0; 6062 break; 6063 case DIF_OP_BA: 6064 pc = DIF_INSTR_LABEL(instr); 6065 break; 6066 case DIF_OP_BE: 6067 if (cc_z) 6068 pc = DIF_INSTR_LABEL(instr); 6069 break; 6070 case DIF_OP_BNE: 6071 if (cc_z == 0) 6072 pc = DIF_INSTR_LABEL(instr); 6073 break; 6074 case DIF_OP_BG: 6075 if ((cc_z | (cc_n ^ cc_v)) == 0) 6076 pc = DIF_INSTR_LABEL(instr); 6077 break; 6078 case DIF_OP_BGU: 6079 if ((cc_c | cc_z) == 0) 6080 pc = DIF_INSTR_LABEL(instr); 6081 break; 6082 case DIF_OP_BGE: 6083 if ((cc_n ^ cc_v) == 0) 6084 pc = DIF_INSTR_LABEL(instr); 6085 break; 6086 case DIF_OP_BGEU: 6087 if (cc_c == 0) 6088 pc = DIF_INSTR_LABEL(instr); 6089 break; 6090 case DIF_OP_BL: 6091 if (cc_n ^ cc_v) 6092 pc = DIF_INSTR_LABEL(instr); 6093 break; 6094 case DIF_OP_BLU: 6095 if (cc_c) 6096 pc = DIF_INSTR_LABEL(instr); 6097 break; 6098 case DIF_OP_BLE: 6099 if (cc_z | (cc_n ^ cc_v)) 6100 pc = DIF_INSTR_LABEL(instr); 6101 break; 6102 case DIF_OP_BLEU: 6103 if (cc_c | cc_z) 6104 pc = DIF_INSTR_LABEL(instr); 6105 break; 6106 case DIF_OP_RLDSB: 6107 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6108 break; 6109 /*FALLTHROUGH*/ 6110 case DIF_OP_LDSB: 6111 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6112 break; 6113 case DIF_OP_RLDSH: 6114 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6115 break; 6116 /*FALLTHROUGH*/ 6117 case DIF_OP_LDSH: 6118 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6119 break; 6120 case DIF_OP_RLDSW: 6121 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6122 break; 6123 /*FALLTHROUGH*/ 6124 case DIF_OP_LDSW: 6125 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6126 break; 6127 case DIF_OP_RLDUB: 6128 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6129 break; 6130 /*FALLTHROUGH*/ 6131 case DIF_OP_LDUB: 6132 regs[rd] = dtrace_load8(regs[r1]); 6133 break; 6134 case DIF_OP_RLDUH: 6135 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6136 break; 6137 /*FALLTHROUGH*/ 6138 case DIF_OP_LDUH: 6139 regs[rd] = dtrace_load16(regs[r1]); 6140 break; 6141 case DIF_OP_RLDUW: 6142 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6143 break; 6144 /*FALLTHROUGH*/ 6145 case DIF_OP_LDUW: 6146 regs[rd] = dtrace_load32(regs[r1]); 6147 break; 6148 case DIF_OP_RLDX: 6149 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6150 break; 6151 /*FALLTHROUGH*/ 6152 case DIF_OP_LDX: 6153 regs[rd] = dtrace_load64(regs[r1]); 6154 break; 6155 case DIF_OP_ULDSB: 6156 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6157 regs[rd] = (int8_t) 6158 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6159 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6160 break; 6161 case DIF_OP_ULDSH: 6162 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6163 regs[rd] = (int16_t) 6164 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6165 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6166 break; 6167 case DIF_OP_ULDSW: 6168 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6169 regs[rd] = (int32_t) 6170 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6171 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6172 break; 6173 case DIF_OP_ULDUB: 6174 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6175 regs[rd] = 6176 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6177 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6178 break; 6179 case DIF_OP_ULDUH: 6180 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6181 regs[rd] = 6182 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6183 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6184 break; 6185 case DIF_OP_ULDUW: 6186 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6187 regs[rd] = 6188 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6189 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6190 break; 6191 case DIF_OP_ULDX: 6192 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6193 regs[rd] = 6194 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6195 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6196 break; 6197 case DIF_OP_RET: 6198 rval = regs[rd]; 6199 pc = textlen; 6200 break; 6201 case DIF_OP_NOP: 6202 break; 6203 case DIF_OP_SETX: 6204 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6205 break; 6206 case DIF_OP_SETS: 6207 regs[rd] = (uint64_t)(uintptr_t) 6208 (strtab + DIF_INSTR_STRING(instr)); 6209 break; 6210 case DIF_OP_SCMP: { 6211 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6212 uintptr_t s1 = regs[r1]; 6213 uintptr_t s2 = regs[r2]; 6214 6215 if (s1 != 0 && 6216 !dtrace_strcanload(s1, sz, mstate, vstate)) 6217 break; 6218 if (s2 != 0 && 6219 !dtrace_strcanload(s2, sz, mstate, vstate)) 6220 break; 6221 6222 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 6223 6224 cc_n = cc_r < 0; 6225 cc_z = cc_r == 0; 6226 cc_v = cc_c = 0; 6227 break; 6228 } 6229 case DIF_OP_LDGA: 6230 regs[rd] = dtrace_dif_variable(mstate, state, 6231 r1, regs[r2]); 6232 break; 6233 case DIF_OP_LDGS: 6234 id = DIF_INSTR_VAR(instr); 6235 6236 if (id >= DIF_VAR_OTHER_UBASE) { 6237 uintptr_t a; 6238 6239 id -= DIF_VAR_OTHER_UBASE; 6240 svar = vstate->dtvs_globals[id]; 6241 ASSERT(svar != NULL); 6242 v = &svar->dtsv_var; 6243 6244 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6245 regs[rd] = svar->dtsv_data; 6246 break; 6247 } 6248 6249 a = (uintptr_t)svar->dtsv_data; 6250 6251 if (*(uint8_t *)a == UINT8_MAX) { 6252 /* 6253 * If the 0th byte is set to UINT8_MAX 6254 * then this is to be treated as a 6255 * reference to a NULL variable. 6256 */ 6257 regs[rd] = 0; 6258 } else { 6259 regs[rd] = a + sizeof (uint64_t); 6260 } 6261 6262 break; 6263 } 6264 6265 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6266 break; 6267 6268 case DIF_OP_STGS: 6269 id = DIF_INSTR_VAR(instr); 6270 6271 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6272 id -= DIF_VAR_OTHER_UBASE; 6273 6274 VERIFY(id < vstate->dtvs_nglobals); 6275 svar = vstate->dtvs_globals[id]; 6276 ASSERT(svar != NULL); 6277 v = &svar->dtsv_var; 6278 6279 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6280 uintptr_t a = (uintptr_t)svar->dtsv_data; 6281 6282 ASSERT(a != 0); 6283 ASSERT(svar->dtsv_size != 0); 6284 6285 if (regs[rd] == 0) { 6286 *(uint8_t *)a = UINT8_MAX; 6287 break; 6288 } else { 6289 *(uint8_t *)a = 0; 6290 a += sizeof (uint64_t); 6291 } 6292 if (!dtrace_vcanload( 6293 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6294 mstate, vstate)) 6295 break; 6296 6297 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6298 (void *)a, &v->dtdv_type); 6299 break; 6300 } 6301 6302 svar->dtsv_data = regs[rd]; 6303 break; 6304 6305 case DIF_OP_LDTA: 6306 /* 6307 * There are no DTrace built-in thread-local arrays at 6308 * present. This opcode is saved for future work. 6309 */ 6310 *flags |= CPU_DTRACE_ILLOP; 6311 regs[rd] = 0; 6312 break; 6313 6314 case DIF_OP_LDLS: 6315 id = DIF_INSTR_VAR(instr); 6316 6317 if (id < DIF_VAR_OTHER_UBASE) { 6318 /* 6319 * For now, this has no meaning. 6320 */ 6321 regs[rd] = 0; 6322 break; 6323 } 6324 6325 id -= DIF_VAR_OTHER_UBASE; 6326 6327 ASSERT(id < vstate->dtvs_nlocals); 6328 ASSERT(vstate->dtvs_locals != NULL); 6329 6330 svar = vstate->dtvs_locals[id]; 6331 ASSERT(svar != NULL); 6332 v = &svar->dtsv_var; 6333 6334 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6335 uintptr_t a = (uintptr_t)svar->dtsv_data; 6336 size_t sz = v->dtdv_type.dtdt_size; 6337 6338 sz += sizeof (uint64_t); 6339 ASSERT(svar->dtsv_size == NCPU * sz); 6340 a += curcpu * sz; 6341 6342 if (*(uint8_t *)a == UINT8_MAX) { 6343 /* 6344 * If the 0th byte is set to UINT8_MAX 6345 * then this is to be treated as a 6346 * reference to a NULL variable. 6347 */ 6348 regs[rd] = 0; 6349 } else { 6350 regs[rd] = a + sizeof (uint64_t); 6351 } 6352 6353 break; 6354 } 6355 6356 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6357 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6358 regs[rd] = tmp[curcpu]; 6359 break; 6360 6361 case DIF_OP_STLS: 6362 id = DIF_INSTR_VAR(instr); 6363 6364 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6365 id -= DIF_VAR_OTHER_UBASE; 6366 VERIFY(id < vstate->dtvs_nlocals); 6367 6368 ASSERT(vstate->dtvs_locals != NULL); 6369 svar = vstate->dtvs_locals[id]; 6370 ASSERT(svar != NULL); 6371 v = &svar->dtsv_var; 6372 6373 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6374 uintptr_t a = (uintptr_t)svar->dtsv_data; 6375 size_t sz = v->dtdv_type.dtdt_size; 6376 6377 sz += sizeof (uint64_t); 6378 ASSERT(svar->dtsv_size == NCPU * sz); 6379 a += curcpu * sz; 6380 6381 if (regs[rd] == 0) { 6382 *(uint8_t *)a = UINT8_MAX; 6383 break; 6384 } else { 6385 *(uint8_t *)a = 0; 6386 a += sizeof (uint64_t); 6387 } 6388 6389 if (!dtrace_vcanload( 6390 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6391 mstate, vstate)) 6392 break; 6393 6394 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6395 (void *)a, &v->dtdv_type); 6396 break; 6397 } 6398 6399 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6400 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6401 tmp[curcpu] = regs[rd]; 6402 break; 6403 6404 case DIF_OP_LDTS: { 6405 dtrace_dynvar_t *dvar; 6406 dtrace_key_t *key; 6407 6408 id = DIF_INSTR_VAR(instr); 6409 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6410 id -= DIF_VAR_OTHER_UBASE; 6411 v = &vstate->dtvs_tlocals[id]; 6412 6413 key = &tupregs[DIF_DTR_NREGS]; 6414 key[0].dttk_value = (uint64_t)id; 6415 key[0].dttk_size = 0; 6416 DTRACE_TLS_THRKEY(key[1].dttk_value); 6417 key[1].dttk_size = 0; 6418 6419 dvar = dtrace_dynvar(dstate, 2, key, 6420 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6421 mstate, vstate); 6422 6423 if (dvar == NULL) { 6424 regs[rd] = 0; 6425 break; 6426 } 6427 6428 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6429 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6430 } else { 6431 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6432 } 6433 6434 break; 6435 } 6436 6437 case DIF_OP_STTS: { 6438 dtrace_dynvar_t *dvar; 6439 dtrace_key_t *key; 6440 6441 id = DIF_INSTR_VAR(instr); 6442 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6443 id -= DIF_VAR_OTHER_UBASE; 6444 VERIFY(id < vstate->dtvs_ntlocals); 6445 6446 key = &tupregs[DIF_DTR_NREGS]; 6447 key[0].dttk_value = (uint64_t)id; 6448 key[0].dttk_size = 0; 6449 DTRACE_TLS_THRKEY(key[1].dttk_value); 6450 key[1].dttk_size = 0; 6451 v = &vstate->dtvs_tlocals[id]; 6452 6453 dvar = dtrace_dynvar(dstate, 2, key, 6454 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6455 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6456 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6457 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6458 6459 /* 6460 * Given that we're storing to thread-local data, 6461 * we need to flush our predicate cache. 6462 */ 6463 curthread->t_predcache = 0; 6464 6465 if (dvar == NULL) 6466 break; 6467 6468 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6469 if (!dtrace_vcanload( 6470 (void *)(uintptr_t)regs[rd], 6471 &v->dtdv_type, mstate, vstate)) 6472 break; 6473 6474 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6475 dvar->dtdv_data, &v->dtdv_type); 6476 } else { 6477 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6478 } 6479 6480 break; 6481 } 6482 6483 case DIF_OP_SRA: 6484 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6485 break; 6486 6487 case DIF_OP_CALL: 6488 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6489 regs, tupregs, ttop, mstate, state); 6490 break; 6491 6492 case DIF_OP_PUSHTR: 6493 if (ttop == DIF_DTR_NREGS) { 6494 *flags |= CPU_DTRACE_TUPOFLOW; 6495 break; 6496 } 6497 6498 if (r1 == DIF_TYPE_STRING) { 6499 /* 6500 * If this is a string type and the size is 0, 6501 * we'll use the system-wide default string 6502 * size. Note that we are _not_ looking at 6503 * the value of the DTRACEOPT_STRSIZE option; 6504 * had this been set, we would expect to have 6505 * a non-zero size value in the "pushtr". 6506 */ 6507 tupregs[ttop].dttk_size = 6508 dtrace_strlen((char *)(uintptr_t)regs[rd], 6509 regs[r2] ? regs[r2] : 6510 dtrace_strsize_default) + 1; 6511 } else { 6512 if (regs[r2] > LONG_MAX) { 6513 *flags |= CPU_DTRACE_ILLOP; 6514 break; 6515 } 6516 6517 tupregs[ttop].dttk_size = regs[r2]; 6518 } 6519 6520 tupregs[ttop++].dttk_value = regs[rd]; 6521 break; 6522 6523 case DIF_OP_PUSHTV: 6524 if (ttop == DIF_DTR_NREGS) { 6525 *flags |= CPU_DTRACE_TUPOFLOW; 6526 break; 6527 } 6528 6529 tupregs[ttop].dttk_value = regs[rd]; 6530 tupregs[ttop++].dttk_size = 0; 6531 break; 6532 6533 case DIF_OP_POPTS: 6534 if (ttop != 0) 6535 ttop--; 6536 break; 6537 6538 case DIF_OP_FLUSHTS: 6539 ttop = 0; 6540 break; 6541 6542 case DIF_OP_LDGAA: 6543 case DIF_OP_LDTAA: { 6544 dtrace_dynvar_t *dvar; 6545 dtrace_key_t *key = tupregs; 6546 uint_t nkeys = ttop; 6547 6548 id = DIF_INSTR_VAR(instr); 6549 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6550 id -= DIF_VAR_OTHER_UBASE; 6551 6552 key[nkeys].dttk_value = (uint64_t)id; 6553 key[nkeys++].dttk_size = 0; 6554 6555 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6556 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6557 key[nkeys++].dttk_size = 0; 6558 VERIFY(id < vstate->dtvs_ntlocals); 6559 v = &vstate->dtvs_tlocals[id]; 6560 } else { 6561 VERIFY(id < vstate->dtvs_nglobals); 6562 v = &vstate->dtvs_globals[id]->dtsv_var; 6563 } 6564 6565 dvar = dtrace_dynvar(dstate, nkeys, key, 6566 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6567 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6568 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6569 6570 if (dvar == NULL) { 6571 regs[rd] = 0; 6572 break; 6573 } 6574 6575 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6576 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6577 } else { 6578 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6579 } 6580 6581 break; 6582 } 6583 6584 case DIF_OP_STGAA: 6585 case DIF_OP_STTAA: { 6586 dtrace_dynvar_t *dvar; 6587 dtrace_key_t *key = tupregs; 6588 uint_t nkeys = ttop; 6589 6590 id = DIF_INSTR_VAR(instr); 6591 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6592 id -= DIF_VAR_OTHER_UBASE; 6593 6594 key[nkeys].dttk_value = (uint64_t)id; 6595 key[nkeys++].dttk_size = 0; 6596 6597 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6598 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6599 key[nkeys++].dttk_size = 0; 6600 VERIFY(id < vstate->dtvs_ntlocals); 6601 v = &vstate->dtvs_tlocals[id]; 6602 } else { 6603 VERIFY(id < vstate->dtvs_nglobals); 6604 v = &vstate->dtvs_globals[id]->dtsv_var; 6605 } 6606 6607 dvar = dtrace_dynvar(dstate, nkeys, key, 6608 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6609 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6610 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6611 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6612 6613 if (dvar == NULL) 6614 break; 6615 6616 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6617 if (!dtrace_vcanload( 6618 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6619 mstate, vstate)) 6620 break; 6621 6622 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6623 dvar->dtdv_data, &v->dtdv_type); 6624 } else { 6625 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6626 } 6627 6628 break; 6629 } 6630 6631 case DIF_OP_ALLOCS: { 6632 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6633 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6634 6635 /* 6636 * Rounding up the user allocation size could have 6637 * overflowed large, bogus allocations (like -1ULL) to 6638 * 0. 6639 */ 6640 if (size < regs[r1] || 6641 !DTRACE_INSCRATCH(mstate, size)) { 6642 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6643 regs[rd] = 0; 6644 break; 6645 } 6646 6647 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6648 mstate->dtms_scratch_ptr += size; 6649 regs[rd] = ptr; 6650 break; 6651 } 6652 6653 case DIF_OP_COPYS: 6654 if (!dtrace_canstore(regs[rd], regs[r2], 6655 mstate, vstate)) { 6656 *flags |= CPU_DTRACE_BADADDR; 6657 *illval = regs[rd]; 6658 break; 6659 } 6660 6661 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6662 break; 6663 6664 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6665 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6666 break; 6667 6668 case DIF_OP_STB: 6669 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6670 *flags |= CPU_DTRACE_BADADDR; 6671 *illval = regs[rd]; 6672 break; 6673 } 6674 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6675 break; 6676 6677 case DIF_OP_STH: 6678 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6679 *flags |= CPU_DTRACE_BADADDR; 6680 *illval = regs[rd]; 6681 break; 6682 } 6683 if (regs[rd] & 1) { 6684 *flags |= CPU_DTRACE_BADALIGN; 6685 *illval = regs[rd]; 6686 break; 6687 } 6688 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6689 break; 6690 6691 case DIF_OP_STW: 6692 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6693 *flags |= CPU_DTRACE_BADADDR; 6694 *illval = regs[rd]; 6695 break; 6696 } 6697 if (regs[rd] & 3) { 6698 *flags |= CPU_DTRACE_BADALIGN; 6699 *illval = regs[rd]; 6700 break; 6701 } 6702 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6703 break; 6704 6705 case DIF_OP_STX: 6706 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6707 *flags |= CPU_DTRACE_BADADDR; 6708 *illval = regs[rd]; 6709 break; 6710 } 6711 if (regs[rd] & 7) { 6712 *flags |= CPU_DTRACE_BADALIGN; 6713 *illval = regs[rd]; 6714 break; 6715 } 6716 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6717 break; 6718 } 6719 } 6720 6721 if (!(*flags & CPU_DTRACE_FAULT)) 6722 return (rval); 6723 6724 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6725 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6726 6727 return (0); 6728 } 6729 6730 static void 6731 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6732 { 6733 dtrace_probe_t *probe = ecb->dte_probe; 6734 dtrace_provider_t *prov = probe->dtpr_provider; 6735 char c[DTRACE_FULLNAMELEN + 80], *str; 6736 char *msg = "dtrace: breakpoint action at probe "; 6737 char *ecbmsg = " (ecb "; 6738 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6739 uintptr_t val = (uintptr_t)ecb; 6740 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6741 6742 if (dtrace_destructive_disallow) 6743 return; 6744 6745 /* 6746 * It's impossible to be taking action on the NULL probe. 6747 */ 6748 ASSERT(probe != NULL); 6749 6750 /* 6751 * This is a poor man's (destitute man's?) sprintf(): we want to 6752 * print the provider name, module name, function name and name of 6753 * the probe, along with the hex address of the ECB with the breakpoint 6754 * action -- all of which we must place in the character buffer by 6755 * hand. 6756 */ 6757 while (*msg != '\0') 6758 c[i++] = *msg++; 6759 6760 for (str = prov->dtpv_name; *str != '\0'; str++) 6761 c[i++] = *str; 6762 c[i++] = ':'; 6763 6764 for (str = probe->dtpr_mod; *str != '\0'; str++) 6765 c[i++] = *str; 6766 c[i++] = ':'; 6767 6768 for (str = probe->dtpr_func; *str != '\0'; str++) 6769 c[i++] = *str; 6770 c[i++] = ':'; 6771 6772 for (str = probe->dtpr_name; *str != '\0'; str++) 6773 c[i++] = *str; 6774 6775 while (*ecbmsg != '\0') 6776 c[i++] = *ecbmsg++; 6777 6778 while (shift >= 0) { 6779 mask = (uintptr_t)0xf << shift; 6780 6781 if (val >= ((uintptr_t)1 << shift)) 6782 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6783 shift -= 4; 6784 } 6785 6786 c[i++] = ')'; 6787 c[i] = '\0'; 6788 6789 #ifdef illumos 6790 debug_enter(c); 6791 #else 6792 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6793 #endif 6794 } 6795 6796 static void 6797 dtrace_action_panic(dtrace_ecb_t *ecb) 6798 { 6799 dtrace_probe_t *probe = ecb->dte_probe; 6800 6801 /* 6802 * It's impossible to be taking action on the NULL probe. 6803 */ 6804 ASSERT(probe != NULL); 6805 6806 if (dtrace_destructive_disallow) 6807 return; 6808 6809 if (dtrace_panicked != NULL) 6810 return; 6811 6812 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6813 return; 6814 6815 /* 6816 * We won the right to panic. (We want to be sure that only one 6817 * thread calls panic() from dtrace_probe(), and that panic() is 6818 * called exactly once.) 6819 */ 6820 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6821 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6822 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6823 } 6824 6825 static void 6826 dtrace_action_raise(uint64_t sig) 6827 { 6828 if (dtrace_destructive_disallow) 6829 return; 6830 6831 if (sig >= NSIG) { 6832 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6833 return; 6834 } 6835 6836 #ifdef illumos 6837 /* 6838 * raise() has a queue depth of 1 -- we ignore all subsequent 6839 * invocations of the raise() action. 6840 */ 6841 if (curthread->t_dtrace_sig == 0) 6842 curthread->t_dtrace_sig = (uint8_t)sig; 6843 6844 curthread->t_sig_check = 1; 6845 aston(curthread); 6846 #else 6847 struct proc *p = curproc; 6848 PROC_LOCK(p); 6849 kern_psignal(p, sig); 6850 PROC_UNLOCK(p); 6851 #endif 6852 } 6853 6854 static void 6855 dtrace_action_stop(void) 6856 { 6857 if (dtrace_destructive_disallow) 6858 return; 6859 6860 #ifdef illumos 6861 if (!curthread->t_dtrace_stop) { 6862 curthread->t_dtrace_stop = 1; 6863 curthread->t_sig_check = 1; 6864 aston(curthread); 6865 } 6866 #else 6867 struct proc *p = curproc; 6868 PROC_LOCK(p); 6869 kern_psignal(p, SIGSTOP); 6870 PROC_UNLOCK(p); 6871 #endif 6872 } 6873 6874 static void 6875 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 6876 { 6877 hrtime_t now; 6878 volatile uint16_t *flags; 6879 #ifdef illumos 6880 cpu_t *cpu = CPU; 6881 #else 6882 cpu_t *cpu = &solaris_cpu[curcpu]; 6883 #endif 6884 6885 if (dtrace_destructive_disallow) 6886 return; 6887 6888 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 6889 6890 now = dtrace_gethrtime(); 6891 6892 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 6893 /* 6894 * We need to advance the mark to the current time. 6895 */ 6896 cpu->cpu_dtrace_chillmark = now; 6897 cpu->cpu_dtrace_chilled = 0; 6898 } 6899 6900 /* 6901 * Now check to see if the requested chill time would take us over 6902 * the maximum amount of time allowed in the chill interval. (Or 6903 * worse, if the calculation itself induces overflow.) 6904 */ 6905 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 6906 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 6907 *flags |= CPU_DTRACE_ILLOP; 6908 return; 6909 } 6910 6911 while (dtrace_gethrtime() - now < val) 6912 continue; 6913 6914 /* 6915 * Normally, we assure that the value of the variable "timestamp" does 6916 * not change within an ECB. The presence of chill() represents an 6917 * exception to this rule, however. 6918 */ 6919 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 6920 cpu->cpu_dtrace_chilled += val; 6921 } 6922 6923 static void 6924 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 6925 uint64_t *buf, uint64_t arg) 6926 { 6927 int nframes = DTRACE_USTACK_NFRAMES(arg); 6928 int strsize = DTRACE_USTACK_STRSIZE(arg); 6929 uint64_t *pcs = &buf[1], *fps; 6930 char *str = (char *)&pcs[nframes]; 6931 int size, offs = 0, i, j; 6932 uintptr_t old = mstate->dtms_scratch_ptr, saved; 6933 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6934 char *sym; 6935 6936 /* 6937 * Should be taking a faster path if string space has not been 6938 * allocated. 6939 */ 6940 ASSERT(strsize != 0); 6941 6942 /* 6943 * We will first allocate some temporary space for the frame pointers. 6944 */ 6945 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6946 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 6947 (nframes * sizeof (uint64_t)); 6948 6949 if (!DTRACE_INSCRATCH(mstate, size)) { 6950 /* 6951 * Not enough room for our frame pointers -- need to indicate 6952 * that we ran out of scratch space. 6953 */ 6954 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6955 return; 6956 } 6957 6958 mstate->dtms_scratch_ptr += size; 6959 saved = mstate->dtms_scratch_ptr; 6960 6961 /* 6962 * Now get a stack with both program counters and frame pointers. 6963 */ 6964 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6965 dtrace_getufpstack(buf, fps, nframes + 1); 6966 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6967 6968 /* 6969 * If that faulted, we're cooked. 6970 */ 6971 if (*flags & CPU_DTRACE_FAULT) 6972 goto out; 6973 6974 /* 6975 * Now we want to walk up the stack, calling the USTACK helper. For 6976 * each iteration, we restore the scratch pointer. 6977 */ 6978 for (i = 0; i < nframes; i++) { 6979 mstate->dtms_scratch_ptr = saved; 6980 6981 if (offs >= strsize) 6982 break; 6983 6984 sym = (char *)(uintptr_t)dtrace_helper( 6985 DTRACE_HELPER_ACTION_USTACK, 6986 mstate, state, pcs[i], fps[i]); 6987 6988 /* 6989 * If we faulted while running the helper, we're going to 6990 * clear the fault and null out the corresponding string. 6991 */ 6992 if (*flags & CPU_DTRACE_FAULT) { 6993 *flags &= ~CPU_DTRACE_FAULT; 6994 str[offs++] = '\0'; 6995 continue; 6996 } 6997 6998 if (sym == NULL) { 6999 str[offs++] = '\0'; 7000 continue; 7001 } 7002 7003 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7004 7005 /* 7006 * Now copy in the string that the helper returned to us. 7007 */ 7008 for (j = 0; offs + j < strsize; j++) { 7009 if ((str[offs + j] = sym[j]) == '\0') 7010 break; 7011 } 7012 7013 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7014 7015 offs += j + 1; 7016 } 7017 7018 if (offs >= strsize) { 7019 /* 7020 * If we didn't have room for all of the strings, we don't 7021 * abort processing -- this needn't be a fatal error -- but we 7022 * still want to increment a counter (dts_stkstroverflows) to 7023 * allow this condition to be warned about. (If this is from 7024 * a jstack() action, it is easily tuned via jstackstrsize.) 7025 */ 7026 dtrace_error(&state->dts_stkstroverflows); 7027 } 7028 7029 while (offs < strsize) 7030 str[offs++] = '\0'; 7031 7032 out: 7033 mstate->dtms_scratch_ptr = old; 7034 } 7035 7036 static void 7037 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 7038 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 7039 { 7040 volatile uint16_t *flags; 7041 uint64_t val = *valp; 7042 size_t valoffs = *valoffsp; 7043 7044 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7045 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 7046 7047 /* 7048 * If this is a string, we're going to only load until we find the zero 7049 * byte -- after which we'll store zero bytes. 7050 */ 7051 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 7052 char c = '\0' + 1; 7053 size_t s; 7054 7055 for (s = 0; s < size; s++) { 7056 if (c != '\0' && dtkind == DIF_TF_BYREF) { 7057 c = dtrace_load8(val++); 7058 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 7059 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7060 c = dtrace_fuword8((void *)(uintptr_t)val++); 7061 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7062 if (*flags & CPU_DTRACE_FAULT) 7063 break; 7064 } 7065 7066 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 7067 7068 if (c == '\0' && intuple) 7069 break; 7070 } 7071 } else { 7072 uint8_t c; 7073 while (valoffs < end) { 7074 if (dtkind == DIF_TF_BYREF) { 7075 c = dtrace_load8(val++); 7076 } else if (dtkind == DIF_TF_BYUREF) { 7077 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7078 c = dtrace_fuword8((void *)(uintptr_t)val++); 7079 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7080 if (*flags & CPU_DTRACE_FAULT) 7081 break; 7082 } 7083 7084 DTRACE_STORE(uint8_t, tomax, 7085 valoffs++, c); 7086 } 7087 } 7088 7089 *valp = val; 7090 *valoffsp = valoffs; 7091 } 7092 7093 /* 7094 * If you're looking for the epicenter of DTrace, you just found it. This 7095 * is the function called by the provider to fire a probe -- from which all 7096 * subsequent probe-context DTrace activity emanates. 7097 */ 7098 void 7099 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7100 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7101 { 7102 processorid_t cpuid; 7103 dtrace_icookie_t cookie; 7104 dtrace_probe_t *probe; 7105 dtrace_mstate_t mstate; 7106 dtrace_ecb_t *ecb; 7107 dtrace_action_t *act; 7108 intptr_t offs; 7109 size_t size; 7110 int vtime, onintr; 7111 volatile uint16_t *flags; 7112 hrtime_t now; 7113 7114 if (panicstr != NULL) 7115 return; 7116 7117 #ifdef illumos 7118 /* 7119 * Kick out immediately if this CPU is still being born (in which case 7120 * curthread will be set to -1) or the current thread can't allow 7121 * probes in its current context. 7122 */ 7123 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7124 return; 7125 #endif 7126 7127 cookie = dtrace_interrupt_disable(); 7128 probe = dtrace_probes[id - 1]; 7129 cpuid = curcpu; 7130 onintr = CPU_ON_INTR(CPU); 7131 7132 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7133 probe->dtpr_predcache == curthread->t_predcache) { 7134 /* 7135 * We have hit in the predicate cache; we know that 7136 * this predicate would evaluate to be false. 7137 */ 7138 dtrace_interrupt_enable(cookie); 7139 return; 7140 } 7141 7142 #ifdef illumos 7143 if (panic_quiesce) { 7144 #else 7145 if (panicstr != NULL) { 7146 #endif 7147 /* 7148 * We don't trace anything if we're panicking. 7149 */ 7150 dtrace_interrupt_enable(cookie); 7151 return; 7152 } 7153 7154 now = mstate.dtms_timestamp = dtrace_gethrtime(); 7155 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7156 vtime = dtrace_vtime_references != 0; 7157 7158 if (vtime && curthread->t_dtrace_start) 7159 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7160 7161 mstate.dtms_difo = NULL; 7162 mstate.dtms_probe = probe; 7163 mstate.dtms_strtok = 0; 7164 mstate.dtms_arg[0] = arg0; 7165 mstate.dtms_arg[1] = arg1; 7166 mstate.dtms_arg[2] = arg2; 7167 mstate.dtms_arg[3] = arg3; 7168 mstate.dtms_arg[4] = arg4; 7169 7170 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7171 7172 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7173 dtrace_predicate_t *pred = ecb->dte_predicate; 7174 dtrace_state_t *state = ecb->dte_state; 7175 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7176 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7177 dtrace_vstate_t *vstate = &state->dts_vstate; 7178 dtrace_provider_t *prov = probe->dtpr_provider; 7179 uint64_t tracememsize = 0; 7180 int committed = 0; 7181 caddr_t tomax; 7182 7183 /* 7184 * A little subtlety with the following (seemingly innocuous) 7185 * declaration of the automatic 'val': by looking at the 7186 * code, you might think that it could be declared in the 7187 * action processing loop, below. (That is, it's only used in 7188 * the action processing loop.) However, it must be declared 7189 * out of that scope because in the case of DIF expression 7190 * arguments to aggregating actions, one iteration of the 7191 * action loop will use the last iteration's value. 7192 */ 7193 uint64_t val = 0; 7194 7195 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7196 mstate.dtms_getf = NULL; 7197 7198 *flags &= ~CPU_DTRACE_ERROR; 7199 7200 if (prov == dtrace_provider) { 7201 /* 7202 * If dtrace itself is the provider of this probe, 7203 * we're only going to continue processing the ECB if 7204 * arg0 (the dtrace_state_t) is equal to the ECB's 7205 * creating state. (This prevents disjoint consumers 7206 * from seeing one another's metaprobes.) 7207 */ 7208 if (arg0 != (uint64_t)(uintptr_t)state) 7209 continue; 7210 } 7211 7212 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7213 /* 7214 * We're not currently active. If our provider isn't 7215 * the dtrace pseudo provider, we're not interested. 7216 */ 7217 if (prov != dtrace_provider) 7218 continue; 7219 7220 /* 7221 * Now we must further check if we are in the BEGIN 7222 * probe. If we are, we will only continue processing 7223 * if we're still in WARMUP -- if one BEGIN enabling 7224 * has invoked the exit() action, we don't want to 7225 * evaluate subsequent BEGIN enablings. 7226 */ 7227 if (probe->dtpr_id == dtrace_probeid_begin && 7228 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7229 ASSERT(state->dts_activity == 7230 DTRACE_ACTIVITY_DRAINING); 7231 continue; 7232 } 7233 } 7234 7235 if (ecb->dte_cond) { 7236 /* 7237 * If the dte_cond bits indicate that this 7238 * consumer is only allowed to see user-mode firings 7239 * of this probe, call the provider's dtps_usermode() 7240 * entry point to check that the probe was fired 7241 * while in a user context. Skip this ECB if that's 7242 * not the case. 7243 */ 7244 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7245 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7246 probe->dtpr_id, probe->dtpr_arg) == 0) 7247 continue; 7248 7249 #ifdef illumos 7250 /* 7251 * This is more subtle than it looks. We have to be 7252 * absolutely certain that CRED() isn't going to 7253 * change out from under us so it's only legit to 7254 * examine that structure if we're in constrained 7255 * situations. Currently, the only times we'll this 7256 * check is if a non-super-user has enabled the 7257 * profile or syscall providers -- providers that 7258 * allow visibility of all processes. For the 7259 * profile case, the check above will ensure that 7260 * we're examining a user context. 7261 */ 7262 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7263 cred_t *cr; 7264 cred_t *s_cr = 7265 ecb->dte_state->dts_cred.dcr_cred; 7266 proc_t *proc; 7267 7268 ASSERT(s_cr != NULL); 7269 7270 if ((cr = CRED()) == NULL || 7271 s_cr->cr_uid != cr->cr_uid || 7272 s_cr->cr_uid != cr->cr_ruid || 7273 s_cr->cr_uid != cr->cr_suid || 7274 s_cr->cr_gid != cr->cr_gid || 7275 s_cr->cr_gid != cr->cr_rgid || 7276 s_cr->cr_gid != cr->cr_sgid || 7277 (proc = ttoproc(curthread)) == NULL || 7278 (proc->p_flag & SNOCD)) 7279 continue; 7280 } 7281 7282 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7283 cred_t *cr; 7284 cred_t *s_cr = 7285 ecb->dte_state->dts_cred.dcr_cred; 7286 7287 ASSERT(s_cr != NULL); 7288 7289 if ((cr = CRED()) == NULL || 7290 s_cr->cr_zone->zone_id != 7291 cr->cr_zone->zone_id) 7292 continue; 7293 } 7294 #endif 7295 } 7296 7297 if (now - state->dts_alive > dtrace_deadman_timeout) { 7298 /* 7299 * We seem to be dead. Unless we (a) have kernel 7300 * destructive permissions (b) have explicitly enabled 7301 * destructive actions and (c) destructive actions have 7302 * not been disabled, we're going to transition into 7303 * the KILLED state, from which no further processing 7304 * on this state will be performed. 7305 */ 7306 if (!dtrace_priv_kernel_destructive(state) || 7307 !state->dts_cred.dcr_destructive || 7308 dtrace_destructive_disallow) { 7309 void *activity = &state->dts_activity; 7310 dtrace_activity_t current; 7311 7312 do { 7313 current = state->dts_activity; 7314 } while (dtrace_cas32(activity, current, 7315 DTRACE_ACTIVITY_KILLED) != current); 7316 7317 continue; 7318 } 7319 } 7320 7321 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7322 ecb->dte_alignment, state, &mstate)) < 0) 7323 continue; 7324 7325 tomax = buf->dtb_tomax; 7326 ASSERT(tomax != NULL); 7327 7328 if (ecb->dte_size != 0) { 7329 dtrace_rechdr_t dtrh; 7330 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7331 mstate.dtms_timestamp = dtrace_gethrtime(); 7332 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7333 } 7334 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7335 dtrh.dtrh_epid = ecb->dte_epid; 7336 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7337 mstate.dtms_timestamp); 7338 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7339 } 7340 7341 mstate.dtms_epid = ecb->dte_epid; 7342 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7343 7344 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7345 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7346 else 7347 mstate.dtms_access = 0; 7348 7349 if (pred != NULL) { 7350 dtrace_difo_t *dp = pred->dtp_difo; 7351 uint64_t rval; 7352 7353 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7354 7355 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7356 dtrace_cacheid_t cid = probe->dtpr_predcache; 7357 7358 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7359 /* 7360 * Update the predicate cache... 7361 */ 7362 ASSERT(cid == pred->dtp_cacheid); 7363 curthread->t_predcache = cid; 7364 } 7365 7366 continue; 7367 } 7368 } 7369 7370 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7371 act != NULL; act = act->dta_next) { 7372 size_t valoffs; 7373 dtrace_difo_t *dp; 7374 dtrace_recdesc_t *rec = &act->dta_rec; 7375 7376 size = rec->dtrd_size; 7377 valoffs = offs + rec->dtrd_offset; 7378 7379 if (DTRACEACT_ISAGG(act->dta_kind)) { 7380 uint64_t v = 0xbad; 7381 dtrace_aggregation_t *agg; 7382 7383 agg = (dtrace_aggregation_t *)act; 7384 7385 if ((dp = act->dta_difo) != NULL) 7386 v = dtrace_dif_emulate(dp, 7387 &mstate, vstate, state); 7388 7389 if (*flags & CPU_DTRACE_ERROR) 7390 continue; 7391 7392 /* 7393 * Note that we always pass the expression 7394 * value from the previous iteration of the 7395 * action loop. This value will only be used 7396 * if there is an expression argument to the 7397 * aggregating action, denoted by the 7398 * dtag_hasarg field. 7399 */ 7400 dtrace_aggregate(agg, buf, 7401 offs, aggbuf, v, val); 7402 continue; 7403 } 7404 7405 switch (act->dta_kind) { 7406 case DTRACEACT_STOP: 7407 if (dtrace_priv_proc_destructive(state)) 7408 dtrace_action_stop(); 7409 continue; 7410 7411 case DTRACEACT_BREAKPOINT: 7412 if (dtrace_priv_kernel_destructive(state)) 7413 dtrace_action_breakpoint(ecb); 7414 continue; 7415 7416 case DTRACEACT_PANIC: 7417 if (dtrace_priv_kernel_destructive(state)) 7418 dtrace_action_panic(ecb); 7419 continue; 7420 7421 case DTRACEACT_STACK: 7422 if (!dtrace_priv_kernel(state)) 7423 continue; 7424 7425 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7426 size / sizeof (pc_t), probe->dtpr_aframes, 7427 DTRACE_ANCHORED(probe) ? NULL : 7428 (uint32_t *)arg0); 7429 continue; 7430 7431 case DTRACEACT_JSTACK: 7432 case DTRACEACT_USTACK: 7433 if (!dtrace_priv_proc(state)) 7434 continue; 7435 7436 /* 7437 * See comment in DIF_VAR_PID. 7438 */ 7439 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7440 CPU_ON_INTR(CPU)) { 7441 int depth = DTRACE_USTACK_NFRAMES( 7442 rec->dtrd_arg) + 1; 7443 7444 dtrace_bzero((void *)(tomax + valoffs), 7445 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7446 + depth * sizeof (uint64_t)); 7447 7448 continue; 7449 } 7450 7451 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7452 curproc->p_dtrace_helpers != NULL) { 7453 /* 7454 * This is the slow path -- we have 7455 * allocated string space, and we're 7456 * getting the stack of a process that 7457 * has helpers. Call into a separate 7458 * routine to perform this processing. 7459 */ 7460 dtrace_action_ustack(&mstate, state, 7461 (uint64_t *)(tomax + valoffs), 7462 rec->dtrd_arg); 7463 continue; 7464 } 7465 7466 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7467 dtrace_getupcstack((uint64_t *) 7468 (tomax + valoffs), 7469 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7470 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7471 continue; 7472 7473 default: 7474 break; 7475 } 7476 7477 dp = act->dta_difo; 7478 ASSERT(dp != NULL); 7479 7480 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7481 7482 if (*flags & CPU_DTRACE_ERROR) 7483 continue; 7484 7485 switch (act->dta_kind) { 7486 case DTRACEACT_SPECULATE: { 7487 dtrace_rechdr_t *dtrh; 7488 7489 ASSERT(buf == &state->dts_buffer[cpuid]); 7490 buf = dtrace_speculation_buffer(state, 7491 cpuid, val); 7492 7493 if (buf == NULL) { 7494 *flags |= CPU_DTRACE_DROP; 7495 continue; 7496 } 7497 7498 offs = dtrace_buffer_reserve(buf, 7499 ecb->dte_needed, ecb->dte_alignment, 7500 state, NULL); 7501 7502 if (offs < 0) { 7503 *flags |= CPU_DTRACE_DROP; 7504 continue; 7505 } 7506 7507 tomax = buf->dtb_tomax; 7508 ASSERT(tomax != NULL); 7509 7510 if (ecb->dte_size == 0) 7511 continue; 7512 7513 ASSERT3U(ecb->dte_size, >=, 7514 sizeof (dtrace_rechdr_t)); 7515 dtrh = ((void *)(tomax + offs)); 7516 dtrh->dtrh_epid = ecb->dte_epid; 7517 /* 7518 * When the speculation is committed, all of 7519 * the records in the speculative buffer will 7520 * have their timestamps set to the commit 7521 * time. Until then, it is set to a sentinel 7522 * value, for debugability. 7523 */ 7524 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7525 continue; 7526 } 7527 7528 case DTRACEACT_PRINTM: { 7529 /* The DIF returns a 'memref'. */ 7530 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7531 7532 /* Get the size from the memref. */ 7533 size = memref[1]; 7534 7535 /* 7536 * Check if the size exceeds the allocated 7537 * buffer size. 7538 */ 7539 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7540 /* Flag a drop! */ 7541 *flags |= CPU_DTRACE_DROP; 7542 continue; 7543 } 7544 7545 /* Store the size in the buffer first. */ 7546 DTRACE_STORE(uintptr_t, tomax, 7547 valoffs, size); 7548 7549 /* 7550 * Offset the buffer address to the start 7551 * of the data. 7552 */ 7553 valoffs += sizeof(uintptr_t); 7554 7555 /* 7556 * Reset to the memory address rather than 7557 * the memref array, then let the BYREF 7558 * code below do the work to store the 7559 * memory data in the buffer. 7560 */ 7561 val = memref[0]; 7562 break; 7563 } 7564 7565 case DTRACEACT_PRINTT: { 7566 /* The DIF returns a 'typeref'. */ 7567 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 7568 char c = '\0' + 1; 7569 size_t s; 7570 7571 /* 7572 * Get the type string length and round it 7573 * up so that the data that follows is 7574 * aligned for easy access. 7575 */ 7576 size_t typs = strlen((char *) typeref[2]) + 1; 7577 typs = roundup(typs, sizeof(uintptr_t)); 7578 7579 /* 7580 *Get the size from the typeref using the 7581 * number of elements and the type size. 7582 */ 7583 size = typeref[1] * typeref[3]; 7584 7585 /* 7586 * Check if the size exceeds the allocated 7587 * buffer size. 7588 */ 7589 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7590 /* Flag a drop! */ 7591 *flags |= CPU_DTRACE_DROP; 7592 7593 } 7594 7595 /* Store the size in the buffer first. */ 7596 DTRACE_STORE(uintptr_t, tomax, 7597 valoffs, size); 7598 valoffs += sizeof(uintptr_t); 7599 7600 /* Store the type size in the buffer. */ 7601 DTRACE_STORE(uintptr_t, tomax, 7602 valoffs, typeref[3]); 7603 valoffs += sizeof(uintptr_t); 7604 7605 val = typeref[2]; 7606 7607 for (s = 0; s < typs; s++) { 7608 if (c != '\0') 7609 c = dtrace_load8(val++); 7610 7611 DTRACE_STORE(uint8_t, tomax, 7612 valoffs++, c); 7613 } 7614 7615 /* 7616 * Reset to the memory address rather than 7617 * the typeref array, then let the BYREF 7618 * code below do the work to store the 7619 * memory data in the buffer. 7620 */ 7621 val = typeref[0]; 7622 break; 7623 } 7624 7625 case DTRACEACT_CHILL: 7626 if (dtrace_priv_kernel_destructive(state)) 7627 dtrace_action_chill(&mstate, val); 7628 continue; 7629 7630 case DTRACEACT_RAISE: 7631 if (dtrace_priv_proc_destructive(state)) 7632 dtrace_action_raise(val); 7633 continue; 7634 7635 case DTRACEACT_COMMIT: 7636 ASSERT(!committed); 7637 7638 /* 7639 * We need to commit our buffer state. 7640 */ 7641 if (ecb->dte_size) 7642 buf->dtb_offset = offs + ecb->dte_size; 7643 buf = &state->dts_buffer[cpuid]; 7644 dtrace_speculation_commit(state, cpuid, val); 7645 committed = 1; 7646 continue; 7647 7648 case DTRACEACT_DISCARD: 7649 dtrace_speculation_discard(state, cpuid, val); 7650 continue; 7651 7652 case DTRACEACT_DIFEXPR: 7653 case DTRACEACT_LIBACT: 7654 case DTRACEACT_PRINTF: 7655 case DTRACEACT_PRINTA: 7656 case DTRACEACT_SYSTEM: 7657 case DTRACEACT_FREOPEN: 7658 case DTRACEACT_TRACEMEM: 7659 break; 7660 7661 case DTRACEACT_TRACEMEM_DYNSIZE: 7662 tracememsize = val; 7663 break; 7664 7665 case DTRACEACT_SYM: 7666 case DTRACEACT_MOD: 7667 if (!dtrace_priv_kernel(state)) 7668 continue; 7669 break; 7670 7671 case DTRACEACT_USYM: 7672 case DTRACEACT_UMOD: 7673 case DTRACEACT_UADDR: { 7674 #ifdef illumos 7675 struct pid *pid = curthread->t_procp->p_pidp; 7676 #endif 7677 7678 if (!dtrace_priv_proc(state)) 7679 continue; 7680 7681 DTRACE_STORE(uint64_t, tomax, 7682 #ifdef illumos 7683 valoffs, (uint64_t)pid->pid_id); 7684 #else 7685 valoffs, (uint64_t) curproc->p_pid); 7686 #endif 7687 DTRACE_STORE(uint64_t, tomax, 7688 valoffs + sizeof (uint64_t), val); 7689 7690 continue; 7691 } 7692 7693 case DTRACEACT_EXIT: { 7694 /* 7695 * For the exit action, we are going to attempt 7696 * to atomically set our activity to be 7697 * draining. If this fails (either because 7698 * another CPU has beat us to the exit action, 7699 * or because our current activity is something 7700 * other than ACTIVE or WARMUP), we will 7701 * continue. This assures that the exit action 7702 * can be successfully recorded at most once 7703 * when we're in the ACTIVE state. If we're 7704 * encountering the exit() action while in 7705 * COOLDOWN, however, we want to honor the new 7706 * status code. (We know that we're the only 7707 * thread in COOLDOWN, so there is no race.) 7708 */ 7709 void *activity = &state->dts_activity; 7710 dtrace_activity_t current = state->dts_activity; 7711 7712 if (current == DTRACE_ACTIVITY_COOLDOWN) 7713 break; 7714 7715 if (current != DTRACE_ACTIVITY_WARMUP) 7716 current = DTRACE_ACTIVITY_ACTIVE; 7717 7718 if (dtrace_cas32(activity, current, 7719 DTRACE_ACTIVITY_DRAINING) != current) { 7720 *flags |= CPU_DTRACE_DROP; 7721 continue; 7722 } 7723 7724 break; 7725 } 7726 7727 default: 7728 ASSERT(0); 7729 } 7730 7731 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7732 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7733 uintptr_t end = valoffs + size; 7734 7735 if (tracememsize != 0 && 7736 valoffs + tracememsize < end) { 7737 end = valoffs + tracememsize; 7738 tracememsize = 0; 7739 } 7740 7741 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7742 !dtrace_vcanload((void *)(uintptr_t)val, 7743 &dp->dtdo_rtype, &mstate, vstate)) 7744 continue; 7745 7746 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7747 &val, end, act->dta_intuple, 7748 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7749 DIF_TF_BYREF: DIF_TF_BYUREF); 7750 continue; 7751 } 7752 7753 switch (size) { 7754 case 0: 7755 break; 7756 7757 case sizeof (uint8_t): 7758 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7759 break; 7760 case sizeof (uint16_t): 7761 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7762 break; 7763 case sizeof (uint32_t): 7764 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7765 break; 7766 case sizeof (uint64_t): 7767 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7768 break; 7769 default: 7770 /* 7771 * Any other size should have been returned by 7772 * reference, not by value. 7773 */ 7774 ASSERT(0); 7775 break; 7776 } 7777 } 7778 7779 if (*flags & CPU_DTRACE_DROP) 7780 continue; 7781 7782 if (*flags & CPU_DTRACE_FAULT) { 7783 int ndx; 7784 dtrace_action_t *err; 7785 7786 buf->dtb_errors++; 7787 7788 if (probe->dtpr_id == dtrace_probeid_error) { 7789 /* 7790 * There's nothing we can do -- we had an 7791 * error on the error probe. We bump an 7792 * error counter to at least indicate that 7793 * this condition happened. 7794 */ 7795 dtrace_error(&state->dts_dblerrors); 7796 continue; 7797 } 7798 7799 if (vtime) { 7800 /* 7801 * Before recursing on dtrace_probe(), we 7802 * need to explicitly clear out our start 7803 * time to prevent it from being accumulated 7804 * into t_dtrace_vtime. 7805 */ 7806 curthread->t_dtrace_start = 0; 7807 } 7808 7809 /* 7810 * Iterate over the actions to figure out which action 7811 * we were processing when we experienced the error. 7812 * Note that act points _past_ the faulting action; if 7813 * act is ecb->dte_action, the fault was in the 7814 * predicate, if it's ecb->dte_action->dta_next it's 7815 * in action #1, and so on. 7816 */ 7817 for (err = ecb->dte_action, ndx = 0; 7818 err != act; err = err->dta_next, ndx++) 7819 continue; 7820 7821 dtrace_probe_error(state, ecb->dte_epid, ndx, 7822 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7823 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7824 cpu_core[cpuid].cpuc_dtrace_illval); 7825 7826 continue; 7827 } 7828 7829 if (!committed) 7830 buf->dtb_offset = offs + ecb->dte_size; 7831 } 7832 7833 if (vtime) 7834 curthread->t_dtrace_start = dtrace_gethrtime(); 7835 7836 dtrace_interrupt_enable(cookie); 7837 } 7838 7839 /* 7840 * DTrace Probe Hashing Functions 7841 * 7842 * The functions in this section (and indeed, the functions in remaining 7843 * sections) are not _called_ from probe context. (Any exceptions to this are 7844 * marked with a "Note:".) Rather, they are called from elsewhere in the 7845 * DTrace framework to look-up probes in, add probes to and remove probes from 7846 * the DTrace probe hashes. (Each probe is hashed by each element of the 7847 * probe tuple -- allowing for fast lookups, regardless of what was 7848 * specified.) 7849 */ 7850 static uint_t 7851 dtrace_hash_str(const char *p) 7852 { 7853 unsigned int g; 7854 uint_t hval = 0; 7855 7856 while (*p) { 7857 hval = (hval << 4) + *p++; 7858 if ((g = (hval & 0xf0000000)) != 0) 7859 hval ^= g >> 24; 7860 hval &= ~g; 7861 } 7862 return (hval); 7863 } 7864 7865 static dtrace_hash_t * 7866 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 7867 { 7868 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 7869 7870 hash->dth_stroffs = stroffs; 7871 hash->dth_nextoffs = nextoffs; 7872 hash->dth_prevoffs = prevoffs; 7873 7874 hash->dth_size = 1; 7875 hash->dth_mask = hash->dth_size - 1; 7876 7877 hash->dth_tab = kmem_zalloc(hash->dth_size * 7878 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 7879 7880 return (hash); 7881 } 7882 7883 static void 7884 dtrace_hash_destroy(dtrace_hash_t *hash) 7885 { 7886 #ifdef DEBUG 7887 int i; 7888 7889 for (i = 0; i < hash->dth_size; i++) 7890 ASSERT(hash->dth_tab[i] == NULL); 7891 #endif 7892 7893 kmem_free(hash->dth_tab, 7894 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 7895 kmem_free(hash, sizeof (dtrace_hash_t)); 7896 } 7897 7898 static void 7899 dtrace_hash_resize(dtrace_hash_t *hash) 7900 { 7901 int size = hash->dth_size, i, ndx; 7902 int new_size = hash->dth_size << 1; 7903 int new_mask = new_size - 1; 7904 dtrace_hashbucket_t **new_tab, *bucket, *next; 7905 7906 ASSERT((new_size & new_mask) == 0); 7907 7908 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 7909 7910 for (i = 0; i < size; i++) { 7911 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 7912 dtrace_probe_t *probe = bucket->dthb_chain; 7913 7914 ASSERT(probe != NULL); 7915 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 7916 7917 next = bucket->dthb_next; 7918 bucket->dthb_next = new_tab[ndx]; 7919 new_tab[ndx] = bucket; 7920 } 7921 } 7922 7923 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 7924 hash->dth_tab = new_tab; 7925 hash->dth_size = new_size; 7926 hash->dth_mask = new_mask; 7927 } 7928 7929 static void 7930 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 7931 { 7932 int hashval = DTRACE_HASHSTR(hash, new); 7933 int ndx = hashval & hash->dth_mask; 7934 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7935 dtrace_probe_t **nextp, **prevp; 7936 7937 for (; bucket != NULL; bucket = bucket->dthb_next) { 7938 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 7939 goto add; 7940 } 7941 7942 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 7943 dtrace_hash_resize(hash); 7944 dtrace_hash_add(hash, new); 7945 return; 7946 } 7947 7948 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 7949 bucket->dthb_next = hash->dth_tab[ndx]; 7950 hash->dth_tab[ndx] = bucket; 7951 hash->dth_nbuckets++; 7952 7953 add: 7954 nextp = DTRACE_HASHNEXT(hash, new); 7955 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 7956 *nextp = bucket->dthb_chain; 7957 7958 if (bucket->dthb_chain != NULL) { 7959 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 7960 ASSERT(*prevp == NULL); 7961 *prevp = new; 7962 } 7963 7964 bucket->dthb_chain = new; 7965 bucket->dthb_len++; 7966 } 7967 7968 static dtrace_probe_t * 7969 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 7970 { 7971 int hashval = DTRACE_HASHSTR(hash, template); 7972 int ndx = hashval & hash->dth_mask; 7973 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7974 7975 for (; bucket != NULL; bucket = bucket->dthb_next) { 7976 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7977 return (bucket->dthb_chain); 7978 } 7979 7980 return (NULL); 7981 } 7982 7983 static int 7984 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 7985 { 7986 int hashval = DTRACE_HASHSTR(hash, template); 7987 int ndx = hashval & hash->dth_mask; 7988 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7989 7990 for (; bucket != NULL; bucket = bucket->dthb_next) { 7991 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7992 return (bucket->dthb_len); 7993 } 7994 7995 return (0); 7996 } 7997 7998 static void 7999 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 8000 { 8001 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 8002 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 8003 8004 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 8005 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 8006 8007 /* 8008 * Find the bucket that we're removing this probe from. 8009 */ 8010 for (; bucket != NULL; bucket = bucket->dthb_next) { 8011 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 8012 break; 8013 } 8014 8015 ASSERT(bucket != NULL); 8016 8017 if (*prevp == NULL) { 8018 if (*nextp == NULL) { 8019 /* 8020 * The removed probe was the only probe on this 8021 * bucket; we need to remove the bucket. 8022 */ 8023 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 8024 8025 ASSERT(bucket->dthb_chain == probe); 8026 ASSERT(b != NULL); 8027 8028 if (b == bucket) { 8029 hash->dth_tab[ndx] = bucket->dthb_next; 8030 } else { 8031 while (b->dthb_next != bucket) 8032 b = b->dthb_next; 8033 b->dthb_next = bucket->dthb_next; 8034 } 8035 8036 ASSERT(hash->dth_nbuckets > 0); 8037 hash->dth_nbuckets--; 8038 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 8039 return; 8040 } 8041 8042 bucket->dthb_chain = *nextp; 8043 } else { 8044 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 8045 } 8046 8047 if (*nextp != NULL) 8048 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 8049 } 8050 8051 /* 8052 * DTrace Utility Functions 8053 * 8054 * These are random utility functions that are _not_ called from probe context. 8055 */ 8056 static int 8057 dtrace_badattr(const dtrace_attribute_t *a) 8058 { 8059 return (a->dtat_name > DTRACE_STABILITY_MAX || 8060 a->dtat_data > DTRACE_STABILITY_MAX || 8061 a->dtat_class > DTRACE_CLASS_MAX); 8062 } 8063 8064 /* 8065 * Return a duplicate copy of a string. If the specified string is NULL, 8066 * this function returns a zero-length string. 8067 */ 8068 static char * 8069 dtrace_strdup(const char *str) 8070 { 8071 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 8072 8073 if (str != NULL) 8074 (void) strcpy(new, str); 8075 8076 return (new); 8077 } 8078 8079 #define DTRACE_ISALPHA(c) \ 8080 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8081 8082 static int 8083 dtrace_badname(const char *s) 8084 { 8085 char c; 8086 8087 if (s == NULL || (c = *s++) == '\0') 8088 return (0); 8089 8090 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8091 return (1); 8092 8093 while ((c = *s++) != '\0') { 8094 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8095 c != '-' && c != '_' && c != '.' && c != '`') 8096 return (1); 8097 } 8098 8099 return (0); 8100 } 8101 8102 static void 8103 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8104 { 8105 uint32_t priv; 8106 8107 #ifdef illumos 8108 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8109 /* 8110 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8111 */ 8112 priv = DTRACE_PRIV_ALL; 8113 } else { 8114 *uidp = crgetuid(cr); 8115 *zoneidp = crgetzoneid(cr); 8116 8117 priv = 0; 8118 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8119 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8120 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8121 priv |= DTRACE_PRIV_USER; 8122 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8123 priv |= DTRACE_PRIV_PROC; 8124 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8125 priv |= DTRACE_PRIV_OWNER; 8126 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8127 priv |= DTRACE_PRIV_ZONEOWNER; 8128 } 8129 #else 8130 priv = DTRACE_PRIV_ALL; 8131 #endif 8132 8133 *privp = priv; 8134 } 8135 8136 #ifdef DTRACE_ERRDEBUG 8137 static void 8138 dtrace_errdebug(const char *str) 8139 { 8140 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8141 int occupied = 0; 8142 8143 mutex_enter(&dtrace_errlock); 8144 dtrace_errlast = str; 8145 dtrace_errthread = curthread; 8146 8147 while (occupied++ < DTRACE_ERRHASHSZ) { 8148 if (dtrace_errhash[hval].dter_msg == str) { 8149 dtrace_errhash[hval].dter_count++; 8150 goto out; 8151 } 8152 8153 if (dtrace_errhash[hval].dter_msg != NULL) { 8154 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8155 continue; 8156 } 8157 8158 dtrace_errhash[hval].dter_msg = str; 8159 dtrace_errhash[hval].dter_count = 1; 8160 goto out; 8161 } 8162 8163 panic("dtrace: undersized error hash"); 8164 out: 8165 mutex_exit(&dtrace_errlock); 8166 } 8167 #endif 8168 8169 /* 8170 * DTrace Matching Functions 8171 * 8172 * These functions are used to match groups of probes, given some elements of 8173 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8174 */ 8175 static int 8176 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8177 zoneid_t zoneid) 8178 { 8179 if (priv != DTRACE_PRIV_ALL) { 8180 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8181 uint32_t match = priv & ppriv; 8182 8183 /* 8184 * No PRIV_DTRACE_* privileges... 8185 */ 8186 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8187 DTRACE_PRIV_KERNEL)) == 0) 8188 return (0); 8189 8190 /* 8191 * No matching bits, but there were bits to match... 8192 */ 8193 if (match == 0 && ppriv != 0) 8194 return (0); 8195 8196 /* 8197 * Need to have permissions to the process, but don't... 8198 */ 8199 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8200 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8201 return (0); 8202 } 8203 8204 /* 8205 * Need to be in the same zone unless we possess the 8206 * privilege to examine all zones. 8207 */ 8208 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8209 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8210 return (0); 8211 } 8212 } 8213 8214 return (1); 8215 } 8216 8217 /* 8218 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8219 * consists of input pattern strings and an ops-vector to evaluate them. 8220 * This function returns >0 for match, 0 for no match, and <0 for error. 8221 */ 8222 static int 8223 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8224 uint32_t priv, uid_t uid, zoneid_t zoneid) 8225 { 8226 dtrace_provider_t *pvp = prp->dtpr_provider; 8227 int rv; 8228 8229 if (pvp->dtpv_defunct) 8230 return (0); 8231 8232 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8233 return (rv); 8234 8235 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8236 return (rv); 8237 8238 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8239 return (rv); 8240 8241 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8242 return (rv); 8243 8244 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8245 return (0); 8246 8247 return (rv); 8248 } 8249 8250 /* 8251 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8252 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8253 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8254 * In addition, all of the recursion cases except for '*' matching have been 8255 * unwound. For '*', we still implement recursive evaluation, but a depth 8256 * counter is maintained and matching is aborted if we recurse too deep. 8257 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8258 */ 8259 static int 8260 dtrace_match_glob(const char *s, const char *p, int depth) 8261 { 8262 const char *olds; 8263 char s1, c; 8264 int gs; 8265 8266 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8267 return (-1); 8268 8269 if (s == NULL) 8270 s = ""; /* treat NULL as empty string */ 8271 8272 top: 8273 olds = s; 8274 s1 = *s++; 8275 8276 if (p == NULL) 8277 return (0); 8278 8279 if ((c = *p++) == '\0') 8280 return (s1 == '\0'); 8281 8282 switch (c) { 8283 case '[': { 8284 int ok = 0, notflag = 0; 8285 char lc = '\0'; 8286 8287 if (s1 == '\0') 8288 return (0); 8289 8290 if (*p == '!') { 8291 notflag = 1; 8292 p++; 8293 } 8294 8295 if ((c = *p++) == '\0') 8296 return (0); 8297 8298 do { 8299 if (c == '-' && lc != '\0' && *p != ']') { 8300 if ((c = *p++) == '\0') 8301 return (0); 8302 if (c == '\\' && (c = *p++) == '\0') 8303 return (0); 8304 8305 if (notflag) { 8306 if (s1 < lc || s1 > c) 8307 ok++; 8308 else 8309 return (0); 8310 } else if (lc <= s1 && s1 <= c) 8311 ok++; 8312 8313 } else if (c == '\\' && (c = *p++) == '\0') 8314 return (0); 8315 8316 lc = c; /* save left-hand 'c' for next iteration */ 8317 8318 if (notflag) { 8319 if (s1 != c) 8320 ok++; 8321 else 8322 return (0); 8323 } else if (s1 == c) 8324 ok++; 8325 8326 if ((c = *p++) == '\0') 8327 return (0); 8328 8329 } while (c != ']'); 8330 8331 if (ok) 8332 goto top; 8333 8334 return (0); 8335 } 8336 8337 case '\\': 8338 if ((c = *p++) == '\0') 8339 return (0); 8340 /*FALLTHRU*/ 8341 8342 default: 8343 if (c != s1) 8344 return (0); 8345 /*FALLTHRU*/ 8346 8347 case '?': 8348 if (s1 != '\0') 8349 goto top; 8350 return (0); 8351 8352 case '*': 8353 while (*p == '*') 8354 p++; /* consecutive *'s are identical to a single one */ 8355 8356 if (*p == '\0') 8357 return (1); 8358 8359 for (s = olds; *s != '\0'; s++) { 8360 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8361 return (gs); 8362 } 8363 8364 return (0); 8365 } 8366 } 8367 8368 /*ARGSUSED*/ 8369 static int 8370 dtrace_match_string(const char *s, const char *p, int depth) 8371 { 8372 return (s != NULL && strcmp(s, p) == 0); 8373 } 8374 8375 /*ARGSUSED*/ 8376 static int 8377 dtrace_match_nul(const char *s, const char *p, int depth) 8378 { 8379 return (1); /* always match the empty pattern */ 8380 } 8381 8382 /*ARGSUSED*/ 8383 static int 8384 dtrace_match_nonzero(const char *s, const char *p, int depth) 8385 { 8386 return (s != NULL && s[0] != '\0'); 8387 } 8388 8389 static int 8390 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8391 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8392 { 8393 dtrace_probe_t template, *probe; 8394 dtrace_hash_t *hash = NULL; 8395 int len, best = INT_MAX, nmatched = 0; 8396 dtrace_id_t i; 8397 8398 ASSERT(MUTEX_HELD(&dtrace_lock)); 8399 8400 /* 8401 * If the probe ID is specified in the key, just lookup by ID and 8402 * invoke the match callback once if a matching probe is found. 8403 */ 8404 if (pkp->dtpk_id != DTRACE_IDNONE) { 8405 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8406 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8407 (void) (*matched)(probe, arg); 8408 nmatched++; 8409 } 8410 return (nmatched); 8411 } 8412 8413 template.dtpr_mod = (char *)pkp->dtpk_mod; 8414 template.dtpr_func = (char *)pkp->dtpk_func; 8415 template.dtpr_name = (char *)pkp->dtpk_name; 8416 8417 /* 8418 * We want to find the most distinct of the module name, function 8419 * name, and name. So for each one that is not a glob pattern or 8420 * empty string, we perform a lookup in the corresponding hash and 8421 * use the hash table with the fewest collisions to do our search. 8422 */ 8423 if (pkp->dtpk_mmatch == &dtrace_match_string && 8424 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8425 best = len; 8426 hash = dtrace_bymod; 8427 } 8428 8429 if (pkp->dtpk_fmatch == &dtrace_match_string && 8430 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8431 best = len; 8432 hash = dtrace_byfunc; 8433 } 8434 8435 if (pkp->dtpk_nmatch == &dtrace_match_string && 8436 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8437 best = len; 8438 hash = dtrace_byname; 8439 } 8440 8441 /* 8442 * If we did not select a hash table, iterate over every probe and 8443 * invoke our callback for each one that matches our input probe key. 8444 */ 8445 if (hash == NULL) { 8446 for (i = 0; i < dtrace_nprobes; i++) { 8447 if ((probe = dtrace_probes[i]) == NULL || 8448 dtrace_match_probe(probe, pkp, priv, uid, 8449 zoneid) <= 0) 8450 continue; 8451 8452 nmatched++; 8453 8454 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8455 break; 8456 } 8457 8458 return (nmatched); 8459 } 8460 8461 /* 8462 * If we selected a hash table, iterate over each probe of the same key 8463 * name and invoke the callback for every probe that matches the other 8464 * attributes of our input probe key. 8465 */ 8466 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8467 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8468 8469 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8470 continue; 8471 8472 nmatched++; 8473 8474 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8475 break; 8476 } 8477 8478 return (nmatched); 8479 } 8480 8481 /* 8482 * Return the function pointer dtrace_probecmp() should use to compare the 8483 * specified pattern with a string. For NULL or empty patterns, we select 8484 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8485 * For non-empty non-glob strings, we use dtrace_match_string(). 8486 */ 8487 static dtrace_probekey_f * 8488 dtrace_probekey_func(const char *p) 8489 { 8490 char c; 8491 8492 if (p == NULL || *p == '\0') 8493 return (&dtrace_match_nul); 8494 8495 while ((c = *p++) != '\0') { 8496 if (c == '[' || c == '?' || c == '*' || c == '\\') 8497 return (&dtrace_match_glob); 8498 } 8499 8500 return (&dtrace_match_string); 8501 } 8502 8503 /* 8504 * Build a probe comparison key for use with dtrace_match_probe() from the 8505 * given probe description. By convention, a null key only matches anchored 8506 * probes: if each field is the empty string, reset dtpk_fmatch to 8507 * dtrace_match_nonzero(). 8508 */ 8509 static void 8510 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8511 { 8512 pkp->dtpk_prov = pdp->dtpd_provider; 8513 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8514 8515 pkp->dtpk_mod = pdp->dtpd_mod; 8516 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8517 8518 pkp->dtpk_func = pdp->dtpd_func; 8519 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8520 8521 pkp->dtpk_name = pdp->dtpd_name; 8522 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8523 8524 pkp->dtpk_id = pdp->dtpd_id; 8525 8526 if (pkp->dtpk_id == DTRACE_IDNONE && 8527 pkp->dtpk_pmatch == &dtrace_match_nul && 8528 pkp->dtpk_mmatch == &dtrace_match_nul && 8529 pkp->dtpk_fmatch == &dtrace_match_nul && 8530 pkp->dtpk_nmatch == &dtrace_match_nul) 8531 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8532 } 8533 8534 /* 8535 * DTrace Provider-to-Framework API Functions 8536 * 8537 * These functions implement much of the Provider-to-Framework API, as 8538 * described in <sys/dtrace.h>. The parts of the API not in this section are 8539 * the functions in the API for probe management (found below), and 8540 * dtrace_probe() itself (found above). 8541 */ 8542 8543 /* 8544 * Register the calling provider with the DTrace framework. This should 8545 * generally be called by DTrace providers in their attach(9E) entry point. 8546 */ 8547 int 8548 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8549 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8550 { 8551 dtrace_provider_t *provider; 8552 8553 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8554 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8555 "arguments", name ? name : "<NULL>"); 8556 return (EINVAL); 8557 } 8558 8559 if (name[0] == '\0' || dtrace_badname(name)) { 8560 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8561 "provider name", name); 8562 return (EINVAL); 8563 } 8564 8565 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8566 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8567 pops->dtps_destroy == NULL || 8568 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8569 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8570 "provider ops", name); 8571 return (EINVAL); 8572 } 8573 8574 if (dtrace_badattr(&pap->dtpa_provider) || 8575 dtrace_badattr(&pap->dtpa_mod) || 8576 dtrace_badattr(&pap->dtpa_func) || 8577 dtrace_badattr(&pap->dtpa_name) || 8578 dtrace_badattr(&pap->dtpa_args)) { 8579 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8580 "provider attributes", name); 8581 return (EINVAL); 8582 } 8583 8584 if (priv & ~DTRACE_PRIV_ALL) { 8585 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8586 "privilege attributes", name); 8587 return (EINVAL); 8588 } 8589 8590 if ((priv & DTRACE_PRIV_KERNEL) && 8591 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8592 pops->dtps_usermode == NULL) { 8593 cmn_err(CE_WARN, "failed to register provider '%s': need " 8594 "dtps_usermode() op for given privilege attributes", name); 8595 return (EINVAL); 8596 } 8597 8598 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8599 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8600 (void) strcpy(provider->dtpv_name, name); 8601 8602 provider->dtpv_attr = *pap; 8603 provider->dtpv_priv.dtpp_flags = priv; 8604 if (cr != NULL) { 8605 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8606 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8607 } 8608 provider->dtpv_pops = *pops; 8609 8610 if (pops->dtps_provide == NULL) { 8611 ASSERT(pops->dtps_provide_module != NULL); 8612 provider->dtpv_pops.dtps_provide = 8613 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8614 } 8615 8616 if (pops->dtps_provide_module == NULL) { 8617 ASSERT(pops->dtps_provide != NULL); 8618 provider->dtpv_pops.dtps_provide_module = 8619 (void (*)(void *, modctl_t *))dtrace_nullop; 8620 } 8621 8622 if (pops->dtps_suspend == NULL) { 8623 ASSERT(pops->dtps_resume == NULL); 8624 provider->dtpv_pops.dtps_suspend = 8625 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8626 provider->dtpv_pops.dtps_resume = 8627 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8628 } 8629 8630 provider->dtpv_arg = arg; 8631 *idp = (dtrace_provider_id_t)provider; 8632 8633 if (pops == &dtrace_provider_ops) { 8634 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8635 ASSERT(MUTEX_HELD(&dtrace_lock)); 8636 ASSERT(dtrace_anon.dta_enabling == NULL); 8637 8638 /* 8639 * We make sure that the DTrace provider is at the head of 8640 * the provider chain. 8641 */ 8642 provider->dtpv_next = dtrace_provider; 8643 dtrace_provider = provider; 8644 return (0); 8645 } 8646 8647 mutex_enter(&dtrace_provider_lock); 8648 mutex_enter(&dtrace_lock); 8649 8650 /* 8651 * If there is at least one provider registered, we'll add this 8652 * provider after the first provider. 8653 */ 8654 if (dtrace_provider != NULL) { 8655 provider->dtpv_next = dtrace_provider->dtpv_next; 8656 dtrace_provider->dtpv_next = provider; 8657 } else { 8658 dtrace_provider = provider; 8659 } 8660 8661 if (dtrace_retained != NULL) { 8662 dtrace_enabling_provide(provider); 8663 8664 /* 8665 * Now we need to call dtrace_enabling_matchall() -- which 8666 * will acquire cpu_lock and dtrace_lock. We therefore need 8667 * to drop all of our locks before calling into it... 8668 */ 8669 mutex_exit(&dtrace_lock); 8670 mutex_exit(&dtrace_provider_lock); 8671 dtrace_enabling_matchall(); 8672 8673 return (0); 8674 } 8675 8676 mutex_exit(&dtrace_lock); 8677 mutex_exit(&dtrace_provider_lock); 8678 8679 return (0); 8680 } 8681 8682 /* 8683 * Unregister the specified provider from the DTrace framework. This should 8684 * generally be called by DTrace providers in their detach(9E) entry point. 8685 */ 8686 int 8687 dtrace_unregister(dtrace_provider_id_t id) 8688 { 8689 dtrace_provider_t *old = (dtrace_provider_t *)id; 8690 dtrace_provider_t *prev = NULL; 8691 int i, self = 0, noreap = 0; 8692 dtrace_probe_t *probe, *first = NULL; 8693 8694 if (old->dtpv_pops.dtps_enable == 8695 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8696 /* 8697 * If DTrace itself is the provider, we're called with locks 8698 * already held. 8699 */ 8700 ASSERT(old == dtrace_provider); 8701 #ifdef illumos 8702 ASSERT(dtrace_devi != NULL); 8703 #endif 8704 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8705 ASSERT(MUTEX_HELD(&dtrace_lock)); 8706 self = 1; 8707 8708 if (dtrace_provider->dtpv_next != NULL) { 8709 /* 8710 * There's another provider here; return failure. 8711 */ 8712 return (EBUSY); 8713 } 8714 } else { 8715 mutex_enter(&dtrace_provider_lock); 8716 #ifdef illumos 8717 mutex_enter(&mod_lock); 8718 #endif 8719 mutex_enter(&dtrace_lock); 8720 } 8721 8722 /* 8723 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8724 * probes, we refuse to let providers slither away, unless this 8725 * provider has already been explicitly invalidated. 8726 */ 8727 if (!old->dtpv_defunct && 8728 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8729 dtrace_anon.dta_state->dts_necbs > 0))) { 8730 if (!self) { 8731 mutex_exit(&dtrace_lock); 8732 #ifdef illumos 8733 mutex_exit(&mod_lock); 8734 #endif 8735 mutex_exit(&dtrace_provider_lock); 8736 } 8737 return (EBUSY); 8738 } 8739 8740 /* 8741 * Attempt to destroy the probes associated with this provider. 8742 */ 8743 for (i = 0; i < dtrace_nprobes; i++) { 8744 if ((probe = dtrace_probes[i]) == NULL) 8745 continue; 8746 8747 if (probe->dtpr_provider != old) 8748 continue; 8749 8750 if (probe->dtpr_ecb == NULL) 8751 continue; 8752 8753 /* 8754 * If we are trying to unregister a defunct provider, and the 8755 * provider was made defunct within the interval dictated by 8756 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8757 * attempt to reap our enablings. To denote that the provider 8758 * should reattempt to unregister itself at some point in the 8759 * future, we will return a differentiable error code (EAGAIN 8760 * instead of EBUSY) in this case. 8761 */ 8762 if (dtrace_gethrtime() - old->dtpv_defunct > 8763 dtrace_unregister_defunct_reap) 8764 noreap = 1; 8765 8766 if (!self) { 8767 mutex_exit(&dtrace_lock); 8768 #ifdef illumos 8769 mutex_exit(&mod_lock); 8770 #endif 8771 mutex_exit(&dtrace_provider_lock); 8772 } 8773 8774 if (noreap) 8775 return (EBUSY); 8776 8777 (void) taskq_dispatch(dtrace_taskq, 8778 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8779 8780 return (EAGAIN); 8781 } 8782 8783 /* 8784 * All of the probes for this provider are disabled; we can safely 8785 * remove all of them from their hash chains and from the probe array. 8786 */ 8787 for (i = 0; i < dtrace_nprobes; i++) { 8788 if ((probe = dtrace_probes[i]) == NULL) 8789 continue; 8790 8791 if (probe->dtpr_provider != old) 8792 continue; 8793 8794 dtrace_probes[i] = NULL; 8795 8796 dtrace_hash_remove(dtrace_bymod, probe); 8797 dtrace_hash_remove(dtrace_byfunc, probe); 8798 dtrace_hash_remove(dtrace_byname, probe); 8799 8800 if (first == NULL) { 8801 first = probe; 8802 probe->dtpr_nextmod = NULL; 8803 } else { 8804 probe->dtpr_nextmod = first; 8805 first = probe; 8806 } 8807 } 8808 8809 /* 8810 * The provider's probes have been removed from the hash chains and 8811 * from the probe array. Now issue a dtrace_sync() to be sure that 8812 * everyone has cleared out from any probe array processing. 8813 */ 8814 dtrace_sync(); 8815 8816 for (probe = first; probe != NULL; probe = first) { 8817 first = probe->dtpr_nextmod; 8818 8819 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8820 probe->dtpr_arg); 8821 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8822 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8823 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8824 #ifdef illumos 8825 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8826 #else 8827 free_unr(dtrace_arena, probe->dtpr_id); 8828 #endif 8829 kmem_free(probe, sizeof (dtrace_probe_t)); 8830 } 8831 8832 if ((prev = dtrace_provider) == old) { 8833 #ifdef illumos 8834 ASSERT(self || dtrace_devi == NULL); 8835 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8836 #endif 8837 dtrace_provider = old->dtpv_next; 8838 } else { 8839 while (prev != NULL && prev->dtpv_next != old) 8840 prev = prev->dtpv_next; 8841 8842 if (prev == NULL) { 8843 panic("attempt to unregister non-existent " 8844 "dtrace provider %p\n", (void *)id); 8845 } 8846 8847 prev->dtpv_next = old->dtpv_next; 8848 } 8849 8850 if (!self) { 8851 mutex_exit(&dtrace_lock); 8852 #ifdef illumos 8853 mutex_exit(&mod_lock); 8854 #endif 8855 mutex_exit(&dtrace_provider_lock); 8856 } 8857 8858 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 8859 kmem_free(old, sizeof (dtrace_provider_t)); 8860 8861 return (0); 8862 } 8863 8864 /* 8865 * Invalidate the specified provider. All subsequent probe lookups for the 8866 * specified provider will fail, but its probes will not be removed. 8867 */ 8868 void 8869 dtrace_invalidate(dtrace_provider_id_t id) 8870 { 8871 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 8872 8873 ASSERT(pvp->dtpv_pops.dtps_enable != 8874 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8875 8876 mutex_enter(&dtrace_provider_lock); 8877 mutex_enter(&dtrace_lock); 8878 8879 pvp->dtpv_defunct = dtrace_gethrtime(); 8880 8881 mutex_exit(&dtrace_lock); 8882 mutex_exit(&dtrace_provider_lock); 8883 } 8884 8885 /* 8886 * Indicate whether or not DTrace has attached. 8887 */ 8888 int 8889 dtrace_attached(void) 8890 { 8891 /* 8892 * dtrace_provider will be non-NULL iff the DTrace driver has 8893 * attached. (It's non-NULL because DTrace is always itself a 8894 * provider.) 8895 */ 8896 return (dtrace_provider != NULL); 8897 } 8898 8899 /* 8900 * Remove all the unenabled probes for the given provider. This function is 8901 * not unlike dtrace_unregister(), except that it doesn't remove the provider 8902 * -- just as many of its associated probes as it can. 8903 */ 8904 int 8905 dtrace_condense(dtrace_provider_id_t id) 8906 { 8907 dtrace_provider_t *prov = (dtrace_provider_t *)id; 8908 int i; 8909 dtrace_probe_t *probe; 8910 8911 /* 8912 * Make sure this isn't the dtrace provider itself. 8913 */ 8914 ASSERT(prov->dtpv_pops.dtps_enable != 8915 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8916 8917 mutex_enter(&dtrace_provider_lock); 8918 mutex_enter(&dtrace_lock); 8919 8920 /* 8921 * Attempt to destroy the probes associated with this provider. 8922 */ 8923 for (i = 0; i < dtrace_nprobes; i++) { 8924 if ((probe = dtrace_probes[i]) == NULL) 8925 continue; 8926 8927 if (probe->dtpr_provider != prov) 8928 continue; 8929 8930 if (probe->dtpr_ecb != NULL) 8931 continue; 8932 8933 dtrace_probes[i] = NULL; 8934 8935 dtrace_hash_remove(dtrace_bymod, probe); 8936 dtrace_hash_remove(dtrace_byfunc, probe); 8937 dtrace_hash_remove(dtrace_byname, probe); 8938 8939 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 8940 probe->dtpr_arg); 8941 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8942 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8943 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8944 kmem_free(probe, sizeof (dtrace_probe_t)); 8945 #ifdef illumos 8946 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 8947 #else 8948 free_unr(dtrace_arena, i + 1); 8949 #endif 8950 } 8951 8952 mutex_exit(&dtrace_lock); 8953 mutex_exit(&dtrace_provider_lock); 8954 8955 return (0); 8956 } 8957 8958 /* 8959 * DTrace Probe Management Functions 8960 * 8961 * The functions in this section perform the DTrace probe management, 8962 * including functions to create probes, look-up probes, and call into the 8963 * providers to request that probes be provided. Some of these functions are 8964 * in the Provider-to-Framework API; these functions can be identified by the 8965 * fact that they are not declared "static". 8966 */ 8967 8968 /* 8969 * Create a probe with the specified module name, function name, and name. 8970 */ 8971 dtrace_id_t 8972 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 8973 const char *func, const char *name, int aframes, void *arg) 8974 { 8975 dtrace_probe_t *probe, **probes; 8976 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 8977 dtrace_id_t id; 8978 8979 if (provider == dtrace_provider) { 8980 ASSERT(MUTEX_HELD(&dtrace_lock)); 8981 } else { 8982 mutex_enter(&dtrace_lock); 8983 } 8984 8985 #ifdef illumos 8986 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 8987 VM_BESTFIT | VM_SLEEP); 8988 #else 8989 id = alloc_unr(dtrace_arena); 8990 #endif 8991 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 8992 8993 probe->dtpr_id = id; 8994 probe->dtpr_gen = dtrace_probegen++; 8995 probe->dtpr_mod = dtrace_strdup(mod); 8996 probe->dtpr_func = dtrace_strdup(func); 8997 probe->dtpr_name = dtrace_strdup(name); 8998 probe->dtpr_arg = arg; 8999 probe->dtpr_aframes = aframes; 9000 probe->dtpr_provider = provider; 9001 9002 dtrace_hash_add(dtrace_bymod, probe); 9003 dtrace_hash_add(dtrace_byfunc, probe); 9004 dtrace_hash_add(dtrace_byname, probe); 9005 9006 if (id - 1 >= dtrace_nprobes) { 9007 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 9008 size_t nsize = osize << 1; 9009 9010 if (nsize == 0) { 9011 ASSERT(osize == 0); 9012 ASSERT(dtrace_probes == NULL); 9013 nsize = sizeof (dtrace_probe_t *); 9014 } 9015 9016 probes = kmem_zalloc(nsize, KM_SLEEP); 9017 9018 if (dtrace_probes == NULL) { 9019 ASSERT(osize == 0); 9020 dtrace_probes = probes; 9021 dtrace_nprobes = 1; 9022 } else { 9023 dtrace_probe_t **oprobes = dtrace_probes; 9024 9025 bcopy(oprobes, probes, osize); 9026 dtrace_membar_producer(); 9027 dtrace_probes = probes; 9028 9029 dtrace_sync(); 9030 9031 /* 9032 * All CPUs are now seeing the new probes array; we can 9033 * safely free the old array. 9034 */ 9035 kmem_free(oprobes, osize); 9036 dtrace_nprobes <<= 1; 9037 } 9038 9039 ASSERT(id - 1 < dtrace_nprobes); 9040 } 9041 9042 ASSERT(dtrace_probes[id - 1] == NULL); 9043 dtrace_probes[id - 1] = probe; 9044 9045 if (provider != dtrace_provider) 9046 mutex_exit(&dtrace_lock); 9047 9048 return (id); 9049 } 9050 9051 static dtrace_probe_t * 9052 dtrace_probe_lookup_id(dtrace_id_t id) 9053 { 9054 ASSERT(MUTEX_HELD(&dtrace_lock)); 9055 9056 if (id == 0 || id > dtrace_nprobes) 9057 return (NULL); 9058 9059 return (dtrace_probes[id - 1]); 9060 } 9061 9062 static int 9063 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 9064 { 9065 *((dtrace_id_t *)arg) = probe->dtpr_id; 9066 9067 return (DTRACE_MATCH_DONE); 9068 } 9069 9070 /* 9071 * Look up a probe based on provider and one or more of module name, function 9072 * name and probe name. 9073 */ 9074 dtrace_id_t 9075 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 9076 char *func, char *name) 9077 { 9078 dtrace_probekey_t pkey; 9079 dtrace_id_t id; 9080 int match; 9081 9082 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9083 pkey.dtpk_pmatch = &dtrace_match_string; 9084 pkey.dtpk_mod = mod; 9085 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9086 pkey.dtpk_func = func; 9087 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9088 pkey.dtpk_name = name; 9089 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9090 pkey.dtpk_id = DTRACE_IDNONE; 9091 9092 mutex_enter(&dtrace_lock); 9093 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9094 dtrace_probe_lookup_match, &id); 9095 mutex_exit(&dtrace_lock); 9096 9097 ASSERT(match == 1 || match == 0); 9098 return (match ? id : 0); 9099 } 9100 9101 /* 9102 * Returns the probe argument associated with the specified probe. 9103 */ 9104 void * 9105 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9106 { 9107 dtrace_probe_t *probe; 9108 void *rval = NULL; 9109 9110 mutex_enter(&dtrace_lock); 9111 9112 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9113 probe->dtpr_provider == (dtrace_provider_t *)id) 9114 rval = probe->dtpr_arg; 9115 9116 mutex_exit(&dtrace_lock); 9117 9118 return (rval); 9119 } 9120 9121 /* 9122 * Copy a probe into a probe description. 9123 */ 9124 static void 9125 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9126 { 9127 bzero(pdp, sizeof (dtrace_probedesc_t)); 9128 pdp->dtpd_id = prp->dtpr_id; 9129 9130 (void) strncpy(pdp->dtpd_provider, 9131 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9132 9133 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9134 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9135 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9136 } 9137 9138 /* 9139 * Called to indicate that a probe -- or probes -- should be provided by a 9140 * specfied provider. If the specified description is NULL, the provider will 9141 * be told to provide all of its probes. (This is done whenever a new 9142 * consumer comes along, or whenever a retained enabling is to be matched.) If 9143 * the specified description is non-NULL, the provider is given the 9144 * opportunity to dynamically provide the specified probe, allowing providers 9145 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9146 * probes.) If the provider is NULL, the operations will be applied to all 9147 * providers; if the provider is non-NULL the operations will only be applied 9148 * to the specified provider. The dtrace_provider_lock must be held, and the 9149 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9150 * will need to grab the dtrace_lock when it reenters the framework through 9151 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9152 */ 9153 static void 9154 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9155 { 9156 #ifdef illumos 9157 modctl_t *ctl; 9158 #endif 9159 int all = 0; 9160 9161 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9162 9163 if (prv == NULL) { 9164 all = 1; 9165 prv = dtrace_provider; 9166 } 9167 9168 do { 9169 /* 9170 * First, call the blanket provide operation. 9171 */ 9172 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9173 9174 #ifdef illumos 9175 /* 9176 * Now call the per-module provide operation. We will grab 9177 * mod_lock to prevent the list from being modified. Note 9178 * that this also prevents the mod_busy bits from changing. 9179 * (mod_busy can only be changed with mod_lock held.) 9180 */ 9181 mutex_enter(&mod_lock); 9182 9183 ctl = &modules; 9184 do { 9185 if (ctl->mod_busy || ctl->mod_mp == NULL) 9186 continue; 9187 9188 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9189 9190 } while ((ctl = ctl->mod_next) != &modules); 9191 9192 mutex_exit(&mod_lock); 9193 #endif 9194 } while (all && (prv = prv->dtpv_next) != NULL); 9195 } 9196 9197 #ifdef illumos 9198 /* 9199 * Iterate over each probe, and call the Framework-to-Provider API function 9200 * denoted by offs. 9201 */ 9202 static void 9203 dtrace_probe_foreach(uintptr_t offs) 9204 { 9205 dtrace_provider_t *prov; 9206 void (*func)(void *, dtrace_id_t, void *); 9207 dtrace_probe_t *probe; 9208 dtrace_icookie_t cookie; 9209 int i; 9210 9211 /* 9212 * We disable interrupts to walk through the probe array. This is 9213 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9214 * won't see stale data. 9215 */ 9216 cookie = dtrace_interrupt_disable(); 9217 9218 for (i = 0; i < dtrace_nprobes; i++) { 9219 if ((probe = dtrace_probes[i]) == NULL) 9220 continue; 9221 9222 if (probe->dtpr_ecb == NULL) { 9223 /* 9224 * This probe isn't enabled -- don't call the function. 9225 */ 9226 continue; 9227 } 9228 9229 prov = probe->dtpr_provider; 9230 func = *((void(**)(void *, dtrace_id_t, void *)) 9231 ((uintptr_t)&prov->dtpv_pops + offs)); 9232 9233 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9234 } 9235 9236 dtrace_interrupt_enable(cookie); 9237 } 9238 #endif 9239 9240 static int 9241 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9242 { 9243 dtrace_probekey_t pkey; 9244 uint32_t priv; 9245 uid_t uid; 9246 zoneid_t zoneid; 9247 9248 ASSERT(MUTEX_HELD(&dtrace_lock)); 9249 dtrace_ecb_create_cache = NULL; 9250 9251 if (desc == NULL) { 9252 /* 9253 * If we're passed a NULL description, we're being asked to 9254 * create an ECB with a NULL probe. 9255 */ 9256 (void) dtrace_ecb_create_enable(NULL, enab); 9257 return (0); 9258 } 9259 9260 dtrace_probekey(desc, &pkey); 9261 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9262 &priv, &uid, &zoneid); 9263 9264 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9265 enab)); 9266 } 9267 9268 /* 9269 * DTrace Helper Provider Functions 9270 */ 9271 static void 9272 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9273 { 9274 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9275 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9276 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9277 } 9278 9279 static void 9280 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9281 const dof_provider_t *dofprov, char *strtab) 9282 { 9283 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9284 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9285 dofprov->dofpv_provattr); 9286 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9287 dofprov->dofpv_modattr); 9288 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9289 dofprov->dofpv_funcattr); 9290 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9291 dofprov->dofpv_nameattr); 9292 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9293 dofprov->dofpv_argsattr); 9294 } 9295 9296 static void 9297 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9298 { 9299 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9300 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9301 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9302 dof_provider_t *provider; 9303 dof_probe_t *probe; 9304 uint32_t *off, *enoff; 9305 uint8_t *arg; 9306 char *strtab; 9307 uint_t i, nprobes; 9308 dtrace_helper_provdesc_t dhpv; 9309 dtrace_helper_probedesc_t dhpb; 9310 dtrace_meta_t *meta = dtrace_meta_pid; 9311 dtrace_mops_t *mops = &meta->dtm_mops; 9312 void *parg; 9313 9314 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9315 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9316 provider->dofpv_strtab * dof->dofh_secsize); 9317 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9318 provider->dofpv_probes * dof->dofh_secsize); 9319 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9320 provider->dofpv_prargs * dof->dofh_secsize); 9321 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9322 provider->dofpv_proffs * dof->dofh_secsize); 9323 9324 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9325 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9326 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9327 enoff = NULL; 9328 9329 /* 9330 * See dtrace_helper_provider_validate(). 9331 */ 9332 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9333 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9334 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9335 provider->dofpv_prenoffs * dof->dofh_secsize); 9336 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9337 } 9338 9339 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9340 9341 /* 9342 * Create the provider. 9343 */ 9344 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9345 9346 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9347 return; 9348 9349 meta->dtm_count++; 9350 9351 /* 9352 * Create the probes. 9353 */ 9354 for (i = 0; i < nprobes; i++) { 9355 probe = (dof_probe_t *)(uintptr_t)(daddr + 9356 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9357 9358 /* See the check in dtrace_helper_provider_validate(). */ 9359 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) 9360 continue; 9361 9362 dhpb.dthpb_mod = dhp->dofhp_mod; 9363 dhpb.dthpb_func = strtab + probe->dofpr_func; 9364 dhpb.dthpb_name = strtab + probe->dofpr_name; 9365 dhpb.dthpb_base = probe->dofpr_addr; 9366 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9367 dhpb.dthpb_noffs = probe->dofpr_noffs; 9368 if (enoff != NULL) { 9369 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9370 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9371 } else { 9372 dhpb.dthpb_enoffs = NULL; 9373 dhpb.dthpb_nenoffs = 0; 9374 } 9375 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9376 dhpb.dthpb_nargc = probe->dofpr_nargc; 9377 dhpb.dthpb_xargc = probe->dofpr_xargc; 9378 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9379 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9380 9381 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9382 } 9383 } 9384 9385 static void 9386 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9387 { 9388 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9389 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9390 int i; 9391 9392 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9393 9394 for (i = 0; i < dof->dofh_secnum; i++) { 9395 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9396 dof->dofh_secoff + i * dof->dofh_secsize); 9397 9398 if (sec->dofs_type != DOF_SECT_PROVIDER) 9399 continue; 9400 9401 dtrace_helper_provide_one(dhp, sec, pid); 9402 } 9403 9404 /* 9405 * We may have just created probes, so we must now rematch against 9406 * any retained enablings. Note that this call will acquire both 9407 * cpu_lock and dtrace_lock; the fact that we are holding 9408 * dtrace_meta_lock now is what defines the ordering with respect to 9409 * these three locks. 9410 */ 9411 dtrace_enabling_matchall(); 9412 } 9413 9414 static void 9415 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9416 { 9417 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9418 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9419 dof_sec_t *str_sec; 9420 dof_provider_t *provider; 9421 char *strtab; 9422 dtrace_helper_provdesc_t dhpv; 9423 dtrace_meta_t *meta = dtrace_meta_pid; 9424 dtrace_mops_t *mops = &meta->dtm_mops; 9425 9426 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9427 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9428 provider->dofpv_strtab * dof->dofh_secsize); 9429 9430 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9431 9432 /* 9433 * Create the provider. 9434 */ 9435 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9436 9437 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9438 9439 meta->dtm_count--; 9440 } 9441 9442 static void 9443 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9444 { 9445 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9446 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9447 int i; 9448 9449 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9450 9451 for (i = 0; i < dof->dofh_secnum; i++) { 9452 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9453 dof->dofh_secoff + i * dof->dofh_secsize); 9454 9455 if (sec->dofs_type != DOF_SECT_PROVIDER) 9456 continue; 9457 9458 dtrace_helper_provider_remove_one(dhp, sec, pid); 9459 } 9460 } 9461 9462 /* 9463 * DTrace Meta Provider-to-Framework API Functions 9464 * 9465 * These functions implement the Meta Provider-to-Framework API, as described 9466 * in <sys/dtrace.h>. 9467 */ 9468 int 9469 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9470 dtrace_meta_provider_id_t *idp) 9471 { 9472 dtrace_meta_t *meta; 9473 dtrace_helpers_t *help, *next; 9474 int i; 9475 9476 *idp = DTRACE_METAPROVNONE; 9477 9478 /* 9479 * We strictly don't need the name, but we hold onto it for 9480 * debuggability. All hail error queues! 9481 */ 9482 if (name == NULL) { 9483 cmn_err(CE_WARN, "failed to register meta-provider: " 9484 "invalid name"); 9485 return (EINVAL); 9486 } 9487 9488 if (mops == NULL || 9489 mops->dtms_create_probe == NULL || 9490 mops->dtms_provide_pid == NULL || 9491 mops->dtms_remove_pid == NULL) { 9492 cmn_err(CE_WARN, "failed to register meta-register %s: " 9493 "invalid ops", name); 9494 return (EINVAL); 9495 } 9496 9497 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9498 meta->dtm_mops = *mops; 9499 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9500 (void) strcpy(meta->dtm_name, name); 9501 meta->dtm_arg = arg; 9502 9503 mutex_enter(&dtrace_meta_lock); 9504 mutex_enter(&dtrace_lock); 9505 9506 if (dtrace_meta_pid != NULL) { 9507 mutex_exit(&dtrace_lock); 9508 mutex_exit(&dtrace_meta_lock); 9509 cmn_err(CE_WARN, "failed to register meta-register %s: " 9510 "user-land meta-provider exists", name); 9511 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9512 kmem_free(meta, sizeof (dtrace_meta_t)); 9513 return (EINVAL); 9514 } 9515 9516 dtrace_meta_pid = meta; 9517 *idp = (dtrace_meta_provider_id_t)meta; 9518 9519 /* 9520 * If there are providers and probes ready to go, pass them 9521 * off to the new meta provider now. 9522 */ 9523 9524 help = dtrace_deferred_pid; 9525 dtrace_deferred_pid = NULL; 9526 9527 mutex_exit(&dtrace_lock); 9528 9529 while (help != NULL) { 9530 for (i = 0; i < help->dthps_nprovs; i++) { 9531 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9532 help->dthps_pid); 9533 } 9534 9535 next = help->dthps_next; 9536 help->dthps_next = NULL; 9537 help->dthps_prev = NULL; 9538 help->dthps_deferred = 0; 9539 help = next; 9540 } 9541 9542 mutex_exit(&dtrace_meta_lock); 9543 9544 return (0); 9545 } 9546 9547 int 9548 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9549 { 9550 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9551 9552 mutex_enter(&dtrace_meta_lock); 9553 mutex_enter(&dtrace_lock); 9554 9555 if (old == dtrace_meta_pid) { 9556 pp = &dtrace_meta_pid; 9557 } else { 9558 panic("attempt to unregister non-existent " 9559 "dtrace meta-provider %p\n", (void *)old); 9560 } 9561 9562 if (old->dtm_count != 0) { 9563 mutex_exit(&dtrace_lock); 9564 mutex_exit(&dtrace_meta_lock); 9565 return (EBUSY); 9566 } 9567 9568 *pp = NULL; 9569 9570 mutex_exit(&dtrace_lock); 9571 mutex_exit(&dtrace_meta_lock); 9572 9573 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9574 kmem_free(old, sizeof (dtrace_meta_t)); 9575 9576 return (0); 9577 } 9578 9579 9580 /* 9581 * DTrace DIF Object Functions 9582 */ 9583 static int 9584 dtrace_difo_err(uint_t pc, const char *format, ...) 9585 { 9586 if (dtrace_err_verbose) { 9587 va_list alist; 9588 9589 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9590 va_start(alist, format); 9591 (void) vuprintf(format, alist); 9592 va_end(alist); 9593 } 9594 9595 #ifdef DTRACE_ERRDEBUG 9596 dtrace_errdebug(format); 9597 #endif 9598 return (1); 9599 } 9600 9601 /* 9602 * Validate a DTrace DIF object by checking the IR instructions. The following 9603 * rules are currently enforced by dtrace_difo_validate(): 9604 * 9605 * 1. Each instruction must have a valid opcode 9606 * 2. Each register, string, variable, or subroutine reference must be valid 9607 * 3. No instruction can modify register %r0 (must be zero) 9608 * 4. All instruction reserved bits must be set to zero 9609 * 5. The last instruction must be a "ret" instruction 9610 * 6. All branch targets must reference a valid instruction _after_ the branch 9611 */ 9612 static int 9613 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9614 cred_t *cr) 9615 { 9616 int err = 0, i; 9617 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9618 int kcheckload; 9619 uint_t pc; 9620 int maxglobal = -1, maxlocal = -1, maxtlocal = -1; 9621 9622 kcheckload = cr == NULL || 9623 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9624 9625 dp->dtdo_destructive = 0; 9626 9627 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9628 dif_instr_t instr = dp->dtdo_buf[pc]; 9629 9630 uint_t r1 = DIF_INSTR_R1(instr); 9631 uint_t r2 = DIF_INSTR_R2(instr); 9632 uint_t rd = DIF_INSTR_RD(instr); 9633 uint_t rs = DIF_INSTR_RS(instr); 9634 uint_t label = DIF_INSTR_LABEL(instr); 9635 uint_t v = DIF_INSTR_VAR(instr); 9636 uint_t subr = DIF_INSTR_SUBR(instr); 9637 uint_t type = DIF_INSTR_TYPE(instr); 9638 uint_t op = DIF_INSTR_OP(instr); 9639 9640 switch (op) { 9641 case DIF_OP_OR: 9642 case DIF_OP_XOR: 9643 case DIF_OP_AND: 9644 case DIF_OP_SLL: 9645 case DIF_OP_SRL: 9646 case DIF_OP_SRA: 9647 case DIF_OP_SUB: 9648 case DIF_OP_ADD: 9649 case DIF_OP_MUL: 9650 case DIF_OP_SDIV: 9651 case DIF_OP_UDIV: 9652 case DIF_OP_SREM: 9653 case DIF_OP_UREM: 9654 case DIF_OP_COPYS: 9655 if (r1 >= nregs) 9656 err += efunc(pc, "invalid register %u\n", r1); 9657 if (r2 >= nregs) 9658 err += efunc(pc, "invalid register %u\n", r2); 9659 if (rd >= nregs) 9660 err += efunc(pc, "invalid register %u\n", rd); 9661 if (rd == 0) 9662 err += efunc(pc, "cannot write to %r0\n"); 9663 break; 9664 case DIF_OP_NOT: 9665 case DIF_OP_MOV: 9666 case DIF_OP_ALLOCS: 9667 if (r1 >= nregs) 9668 err += efunc(pc, "invalid register %u\n", r1); 9669 if (r2 != 0) 9670 err += efunc(pc, "non-zero reserved bits\n"); 9671 if (rd >= nregs) 9672 err += efunc(pc, "invalid register %u\n", rd); 9673 if (rd == 0) 9674 err += efunc(pc, "cannot write to %r0\n"); 9675 break; 9676 case DIF_OP_LDSB: 9677 case DIF_OP_LDSH: 9678 case DIF_OP_LDSW: 9679 case DIF_OP_LDUB: 9680 case DIF_OP_LDUH: 9681 case DIF_OP_LDUW: 9682 case DIF_OP_LDX: 9683 if (r1 >= nregs) 9684 err += efunc(pc, "invalid register %u\n", r1); 9685 if (r2 != 0) 9686 err += efunc(pc, "non-zero reserved bits\n"); 9687 if (rd >= nregs) 9688 err += efunc(pc, "invalid register %u\n", rd); 9689 if (rd == 0) 9690 err += efunc(pc, "cannot write to %r0\n"); 9691 if (kcheckload) 9692 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9693 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9694 break; 9695 case DIF_OP_RLDSB: 9696 case DIF_OP_RLDSH: 9697 case DIF_OP_RLDSW: 9698 case DIF_OP_RLDUB: 9699 case DIF_OP_RLDUH: 9700 case DIF_OP_RLDUW: 9701 case DIF_OP_RLDX: 9702 if (r1 >= nregs) 9703 err += efunc(pc, "invalid register %u\n", r1); 9704 if (r2 != 0) 9705 err += efunc(pc, "non-zero reserved bits\n"); 9706 if (rd >= nregs) 9707 err += efunc(pc, "invalid register %u\n", rd); 9708 if (rd == 0) 9709 err += efunc(pc, "cannot write to %r0\n"); 9710 break; 9711 case DIF_OP_ULDSB: 9712 case DIF_OP_ULDSH: 9713 case DIF_OP_ULDSW: 9714 case DIF_OP_ULDUB: 9715 case DIF_OP_ULDUH: 9716 case DIF_OP_ULDUW: 9717 case DIF_OP_ULDX: 9718 if (r1 >= nregs) 9719 err += efunc(pc, "invalid register %u\n", r1); 9720 if (r2 != 0) 9721 err += efunc(pc, "non-zero reserved bits\n"); 9722 if (rd >= nregs) 9723 err += efunc(pc, "invalid register %u\n", rd); 9724 if (rd == 0) 9725 err += efunc(pc, "cannot write to %r0\n"); 9726 break; 9727 case DIF_OP_STB: 9728 case DIF_OP_STH: 9729 case DIF_OP_STW: 9730 case DIF_OP_STX: 9731 if (r1 >= nregs) 9732 err += efunc(pc, "invalid register %u\n", r1); 9733 if (r2 != 0) 9734 err += efunc(pc, "non-zero reserved bits\n"); 9735 if (rd >= nregs) 9736 err += efunc(pc, "invalid register %u\n", rd); 9737 if (rd == 0) 9738 err += efunc(pc, "cannot write to 0 address\n"); 9739 break; 9740 case DIF_OP_CMP: 9741 case DIF_OP_SCMP: 9742 if (r1 >= nregs) 9743 err += efunc(pc, "invalid register %u\n", r1); 9744 if (r2 >= nregs) 9745 err += efunc(pc, "invalid register %u\n", r2); 9746 if (rd != 0) 9747 err += efunc(pc, "non-zero reserved bits\n"); 9748 break; 9749 case DIF_OP_TST: 9750 if (r1 >= nregs) 9751 err += efunc(pc, "invalid register %u\n", r1); 9752 if (r2 != 0 || rd != 0) 9753 err += efunc(pc, "non-zero reserved bits\n"); 9754 break; 9755 case DIF_OP_BA: 9756 case DIF_OP_BE: 9757 case DIF_OP_BNE: 9758 case DIF_OP_BG: 9759 case DIF_OP_BGU: 9760 case DIF_OP_BGE: 9761 case DIF_OP_BGEU: 9762 case DIF_OP_BL: 9763 case DIF_OP_BLU: 9764 case DIF_OP_BLE: 9765 case DIF_OP_BLEU: 9766 if (label >= dp->dtdo_len) { 9767 err += efunc(pc, "invalid branch target %u\n", 9768 label); 9769 } 9770 if (label <= pc) { 9771 err += efunc(pc, "backward branch to %u\n", 9772 label); 9773 } 9774 break; 9775 case DIF_OP_RET: 9776 if (r1 != 0 || r2 != 0) 9777 err += efunc(pc, "non-zero reserved bits\n"); 9778 if (rd >= nregs) 9779 err += efunc(pc, "invalid register %u\n", rd); 9780 break; 9781 case DIF_OP_NOP: 9782 case DIF_OP_POPTS: 9783 case DIF_OP_FLUSHTS: 9784 if (r1 != 0 || r2 != 0 || rd != 0) 9785 err += efunc(pc, "non-zero reserved bits\n"); 9786 break; 9787 case DIF_OP_SETX: 9788 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9789 err += efunc(pc, "invalid integer ref %u\n", 9790 DIF_INSTR_INTEGER(instr)); 9791 } 9792 if (rd >= nregs) 9793 err += efunc(pc, "invalid register %u\n", rd); 9794 if (rd == 0) 9795 err += efunc(pc, "cannot write to %r0\n"); 9796 break; 9797 case DIF_OP_SETS: 9798 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9799 err += efunc(pc, "invalid string ref %u\n", 9800 DIF_INSTR_STRING(instr)); 9801 } 9802 if (rd >= nregs) 9803 err += efunc(pc, "invalid register %u\n", rd); 9804 if (rd == 0) 9805 err += efunc(pc, "cannot write to %r0\n"); 9806 break; 9807 case DIF_OP_LDGA: 9808 case DIF_OP_LDTA: 9809 if (r1 > DIF_VAR_ARRAY_MAX) 9810 err += efunc(pc, "invalid array %u\n", r1); 9811 if (r2 >= nregs) 9812 err += efunc(pc, "invalid register %u\n", r2); 9813 if (rd >= nregs) 9814 err += efunc(pc, "invalid register %u\n", rd); 9815 if (rd == 0) 9816 err += efunc(pc, "cannot write to %r0\n"); 9817 break; 9818 case DIF_OP_LDGS: 9819 case DIF_OP_LDTS: 9820 case DIF_OP_LDLS: 9821 case DIF_OP_LDGAA: 9822 case DIF_OP_LDTAA: 9823 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9824 err += efunc(pc, "invalid variable %u\n", v); 9825 if (rd >= nregs) 9826 err += efunc(pc, "invalid register %u\n", rd); 9827 if (rd == 0) 9828 err += efunc(pc, "cannot write to %r0\n"); 9829 break; 9830 case DIF_OP_STGS: 9831 case DIF_OP_STTS: 9832 case DIF_OP_STLS: 9833 case DIF_OP_STGAA: 9834 case DIF_OP_STTAA: 9835 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9836 err += efunc(pc, "invalid variable %u\n", v); 9837 if (rs >= nregs) 9838 err += efunc(pc, "invalid register %u\n", rd); 9839 break; 9840 case DIF_OP_CALL: 9841 if (subr > DIF_SUBR_MAX) 9842 err += efunc(pc, "invalid subr %u\n", subr); 9843 if (rd >= nregs) 9844 err += efunc(pc, "invalid register %u\n", rd); 9845 if (rd == 0) 9846 err += efunc(pc, "cannot write to %r0\n"); 9847 9848 if (subr == DIF_SUBR_COPYOUT || 9849 subr == DIF_SUBR_COPYOUTSTR) { 9850 dp->dtdo_destructive = 1; 9851 } 9852 9853 if (subr == DIF_SUBR_GETF) { 9854 /* 9855 * If we have a getf() we need to record that 9856 * in our state. Note that our state can be 9857 * NULL if this is a helper -- but in that 9858 * case, the call to getf() is itself illegal, 9859 * and will be caught (slightly later) when 9860 * the helper is validated. 9861 */ 9862 if (vstate->dtvs_state != NULL) 9863 vstate->dtvs_state->dts_getf++; 9864 } 9865 9866 break; 9867 case DIF_OP_PUSHTR: 9868 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 9869 err += efunc(pc, "invalid ref type %u\n", type); 9870 if (r2 >= nregs) 9871 err += efunc(pc, "invalid register %u\n", r2); 9872 if (rs >= nregs) 9873 err += efunc(pc, "invalid register %u\n", rs); 9874 break; 9875 case DIF_OP_PUSHTV: 9876 if (type != DIF_TYPE_CTF) 9877 err += efunc(pc, "invalid val type %u\n", type); 9878 if (r2 >= nregs) 9879 err += efunc(pc, "invalid register %u\n", r2); 9880 if (rs >= nregs) 9881 err += efunc(pc, "invalid register %u\n", rs); 9882 break; 9883 default: 9884 err += efunc(pc, "invalid opcode %u\n", 9885 DIF_INSTR_OP(instr)); 9886 } 9887 } 9888 9889 if (dp->dtdo_len != 0 && 9890 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 9891 err += efunc(dp->dtdo_len - 1, 9892 "expected 'ret' as last DIF instruction\n"); 9893 } 9894 9895 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 9896 /* 9897 * If we're not returning by reference, the size must be either 9898 * 0 or the size of one of the base types. 9899 */ 9900 switch (dp->dtdo_rtype.dtdt_size) { 9901 case 0: 9902 case sizeof (uint8_t): 9903 case sizeof (uint16_t): 9904 case sizeof (uint32_t): 9905 case sizeof (uint64_t): 9906 break; 9907 9908 default: 9909 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 9910 } 9911 } 9912 9913 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 9914 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 9915 dtrace_diftype_t *vt, *et; 9916 uint_t id, ndx; 9917 9918 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 9919 v->dtdv_scope != DIFV_SCOPE_THREAD && 9920 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 9921 err += efunc(i, "unrecognized variable scope %d\n", 9922 v->dtdv_scope); 9923 break; 9924 } 9925 9926 if (v->dtdv_kind != DIFV_KIND_ARRAY && 9927 v->dtdv_kind != DIFV_KIND_SCALAR) { 9928 err += efunc(i, "unrecognized variable type %d\n", 9929 v->dtdv_kind); 9930 break; 9931 } 9932 9933 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 9934 err += efunc(i, "%d exceeds variable id limit\n", id); 9935 break; 9936 } 9937 9938 if (id < DIF_VAR_OTHER_UBASE) 9939 continue; 9940 9941 /* 9942 * For user-defined variables, we need to check that this 9943 * definition is identical to any previous definition that we 9944 * encountered. 9945 */ 9946 ndx = id - DIF_VAR_OTHER_UBASE; 9947 9948 switch (v->dtdv_scope) { 9949 case DIFV_SCOPE_GLOBAL: 9950 if (maxglobal == -1 || ndx > maxglobal) 9951 maxglobal = ndx; 9952 9953 if (ndx < vstate->dtvs_nglobals) { 9954 dtrace_statvar_t *svar; 9955 9956 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 9957 existing = &svar->dtsv_var; 9958 } 9959 9960 break; 9961 9962 case DIFV_SCOPE_THREAD: 9963 if (maxtlocal == -1 || ndx > maxtlocal) 9964 maxtlocal = ndx; 9965 9966 if (ndx < vstate->dtvs_ntlocals) 9967 existing = &vstate->dtvs_tlocals[ndx]; 9968 break; 9969 9970 case DIFV_SCOPE_LOCAL: 9971 if (maxlocal == -1 || ndx > maxlocal) 9972 maxlocal = ndx; 9973 9974 if (ndx < vstate->dtvs_nlocals) { 9975 dtrace_statvar_t *svar; 9976 9977 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 9978 existing = &svar->dtsv_var; 9979 } 9980 9981 break; 9982 } 9983 9984 vt = &v->dtdv_type; 9985 9986 if (vt->dtdt_flags & DIF_TF_BYREF) { 9987 if (vt->dtdt_size == 0) { 9988 err += efunc(i, "zero-sized variable\n"); 9989 break; 9990 } 9991 9992 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || 9993 v->dtdv_scope == DIFV_SCOPE_LOCAL) && 9994 vt->dtdt_size > dtrace_statvar_maxsize) { 9995 err += efunc(i, "oversized by-ref static\n"); 9996 break; 9997 } 9998 } 9999 10000 if (existing == NULL || existing->dtdv_id == 0) 10001 continue; 10002 10003 ASSERT(existing->dtdv_id == v->dtdv_id); 10004 ASSERT(existing->dtdv_scope == v->dtdv_scope); 10005 10006 if (existing->dtdv_kind != v->dtdv_kind) 10007 err += efunc(i, "%d changed variable kind\n", id); 10008 10009 et = &existing->dtdv_type; 10010 10011 if (vt->dtdt_flags != et->dtdt_flags) { 10012 err += efunc(i, "%d changed variable type flags\n", id); 10013 break; 10014 } 10015 10016 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 10017 err += efunc(i, "%d changed variable type size\n", id); 10018 break; 10019 } 10020 } 10021 10022 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 10023 dif_instr_t instr = dp->dtdo_buf[pc]; 10024 10025 uint_t v = DIF_INSTR_VAR(instr); 10026 uint_t op = DIF_INSTR_OP(instr); 10027 10028 switch (op) { 10029 case DIF_OP_LDGS: 10030 case DIF_OP_LDGAA: 10031 case DIF_OP_STGS: 10032 case DIF_OP_STGAA: 10033 if (v > DIF_VAR_OTHER_UBASE + maxglobal) 10034 err += efunc(pc, "invalid variable %u\n", v); 10035 break; 10036 case DIF_OP_LDTS: 10037 case DIF_OP_LDTAA: 10038 case DIF_OP_STTS: 10039 case DIF_OP_STTAA: 10040 if (v > DIF_VAR_OTHER_UBASE + maxtlocal) 10041 err += efunc(pc, "invalid variable %u\n", v); 10042 break; 10043 case DIF_OP_LDLS: 10044 case DIF_OP_STLS: 10045 if (v > DIF_VAR_OTHER_UBASE + maxlocal) 10046 err += efunc(pc, "invalid variable %u\n", v); 10047 break; 10048 default: 10049 break; 10050 } 10051 } 10052 10053 return (err); 10054 } 10055 10056 /* 10057 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 10058 * are much more constrained than normal DIFOs. Specifically, they may 10059 * not: 10060 * 10061 * 1. Make calls to subroutines other than copyin(), copyinstr() or 10062 * miscellaneous string routines 10063 * 2. Access DTrace variables other than the args[] array, and the 10064 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 10065 * 3. Have thread-local variables. 10066 * 4. Have dynamic variables. 10067 */ 10068 static int 10069 dtrace_difo_validate_helper(dtrace_difo_t *dp) 10070 { 10071 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 10072 int err = 0; 10073 uint_t pc; 10074 10075 for (pc = 0; pc < dp->dtdo_len; pc++) { 10076 dif_instr_t instr = dp->dtdo_buf[pc]; 10077 10078 uint_t v = DIF_INSTR_VAR(instr); 10079 uint_t subr = DIF_INSTR_SUBR(instr); 10080 uint_t op = DIF_INSTR_OP(instr); 10081 10082 switch (op) { 10083 case DIF_OP_OR: 10084 case DIF_OP_XOR: 10085 case DIF_OP_AND: 10086 case DIF_OP_SLL: 10087 case DIF_OP_SRL: 10088 case DIF_OP_SRA: 10089 case DIF_OP_SUB: 10090 case DIF_OP_ADD: 10091 case DIF_OP_MUL: 10092 case DIF_OP_SDIV: 10093 case DIF_OP_UDIV: 10094 case DIF_OP_SREM: 10095 case DIF_OP_UREM: 10096 case DIF_OP_COPYS: 10097 case DIF_OP_NOT: 10098 case DIF_OP_MOV: 10099 case DIF_OP_RLDSB: 10100 case DIF_OP_RLDSH: 10101 case DIF_OP_RLDSW: 10102 case DIF_OP_RLDUB: 10103 case DIF_OP_RLDUH: 10104 case DIF_OP_RLDUW: 10105 case DIF_OP_RLDX: 10106 case DIF_OP_ULDSB: 10107 case DIF_OP_ULDSH: 10108 case DIF_OP_ULDSW: 10109 case DIF_OP_ULDUB: 10110 case DIF_OP_ULDUH: 10111 case DIF_OP_ULDUW: 10112 case DIF_OP_ULDX: 10113 case DIF_OP_STB: 10114 case DIF_OP_STH: 10115 case DIF_OP_STW: 10116 case DIF_OP_STX: 10117 case DIF_OP_ALLOCS: 10118 case DIF_OP_CMP: 10119 case DIF_OP_SCMP: 10120 case DIF_OP_TST: 10121 case DIF_OP_BA: 10122 case DIF_OP_BE: 10123 case DIF_OP_BNE: 10124 case DIF_OP_BG: 10125 case DIF_OP_BGU: 10126 case DIF_OP_BGE: 10127 case DIF_OP_BGEU: 10128 case DIF_OP_BL: 10129 case DIF_OP_BLU: 10130 case DIF_OP_BLE: 10131 case DIF_OP_BLEU: 10132 case DIF_OP_RET: 10133 case DIF_OP_NOP: 10134 case DIF_OP_POPTS: 10135 case DIF_OP_FLUSHTS: 10136 case DIF_OP_SETX: 10137 case DIF_OP_SETS: 10138 case DIF_OP_LDGA: 10139 case DIF_OP_LDLS: 10140 case DIF_OP_STGS: 10141 case DIF_OP_STLS: 10142 case DIF_OP_PUSHTR: 10143 case DIF_OP_PUSHTV: 10144 break; 10145 10146 case DIF_OP_LDGS: 10147 if (v >= DIF_VAR_OTHER_UBASE) 10148 break; 10149 10150 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10151 break; 10152 10153 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10154 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10155 v == DIF_VAR_EXECARGS || 10156 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10157 v == DIF_VAR_UID || v == DIF_VAR_GID) 10158 break; 10159 10160 err += efunc(pc, "illegal variable %u\n", v); 10161 break; 10162 10163 case DIF_OP_LDTA: 10164 case DIF_OP_LDTS: 10165 case DIF_OP_LDGAA: 10166 case DIF_OP_LDTAA: 10167 err += efunc(pc, "illegal dynamic variable load\n"); 10168 break; 10169 10170 case DIF_OP_STTS: 10171 case DIF_OP_STGAA: 10172 case DIF_OP_STTAA: 10173 err += efunc(pc, "illegal dynamic variable store\n"); 10174 break; 10175 10176 case DIF_OP_CALL: 10177 if (subr == DIF_SUBR_ALLOCA || 10178 subr == DIF_SUBR_BCOPY || 10179 subr == DIF_SUBR_COPYIN || 10180 subr == DIF_SUBR_COPYINTO || 10181 subr == DIF_SUBR_COPYINSTR || 10182 subr == DIF_SUBR_INDEX || 10183 subr == DIF_SUBR_INET_NTOA || 10184 subr == DIF_SUBR_INET_NTOA6 || 10185 subr == DIF_SUBR_INET_NTOP || 10186 subr == DIF_SUBR_JSON || 10187 subr == DIF_SUBR_LLTOSTR || 10188 subr == DIF_SUBR_STRTOLL || 10189 subr == DIF_SUBR_RINDEX || 10190 subr == DIF_SUBR_STRCHR || 10191 subr == DIF_SUBR_STRJOIN || 10192 subr == DIF_SUBR_STRRCHR || 10193 subr == DIF_SUBR_STRSTR || 10194 subr == DIF_SUBR_HTONS || 10195 subr == DIF_SUBR_HTONL || 10196 subr == DIF_SUBR_HTONLL || 10197 subr == DIF_SUBR_NTOHS || 10198 subr == DIF_SUBR_NTOHL || 10199 subr == DIF_SUBR_NTOHLL || 10200 subr == DIF_SUBR_MEMREF || 10201 #ifndef illumos 10202 subr == DIF_SUBR_MEMSTR || 10203 #endif 10204 subr == DIF_SUBR_TYPEREF) 10205 break; 10206 10207 err += efunc(pc, "invalid subr %u\n", subr); 10208 break; 10209 10210 default: 10211 err += efunc(pc, "invalid opcode %u\n", 10212 DIF_INSTR_OP(instr)); 10213 } 10214 } 10215 10216 return (err); 10217 } 10218 10219 /* 10220 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10221 * basis; 0 if not. 10222 */ 10223 static int 10224 dtrace_difo_cacheable(dtrace_difo_t *dp) 10225 { 10226 int i; 10227 10228 if (dp == NULL) 10229 return (0); 10230 10231 for (i = 0; i < dp->dtdo_varlen; i++) { 10232 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10233 10234 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10235 continue; 10236 10237 switch (v->dtdv_id) { 10238 case DIF_VAR_CURTHREAD: 10239 case DIF_VAR_PID: 10240 case DIF_VAR_TID: 10241 case DIF_VAR_EXECARGS: 10242 case DIF_VAR_EXECNAME: 10243 case DIF_VAR_ZONENAME: 10244 break; 10245 10246 default: 10247 return (0); 10248 } 10249 } 10250 10251 /* 10252 * This DIF object may be cacheable. Now we need to look for any 10253 * array loading instructions, any memory loading instructions, or 10254 * any stores to thread-local variables. 10255 */ 10256 for (i = 0; i < dp->dtdo_len; i++) { 10257 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10258 10259 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10260 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10261 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10262 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10263 return (0); 10264 } 10265 10266 return (1); 10267 } 10268 10269 static void 10270 dtrace_difo_hold(dtrace_difo_t *dp) 10271 { 10272 int i; 10273 10274 ASSERT(MUTEX_HELD(&dtrace_lock)); 10275 10276 dp->dtdo_refcnt++; 10277 ASSERT(dp->dtdo_refcnt != 0); 10278 10279 /* 10280 * We need to check this DIF object for references to the variable 10281 * DIF_VAR_VTIMESTAMP. 10282 */ 10283 for (i = 0; i < dp->dtdo_varlen; i++) { 10284 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10285 10286 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10287 continue; 10288 10289 if (dtrace_vtime_references++ == 0) 10290 dtrace_vtime_enable(); 10291 } 10292 } 10293 10294 /* 10295 * This routine calculates the dynamic variable chunksize for a given DIF 10296 * object. The calculation is not fool-proof, and can probably be tricked by 10297 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10298 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10299 * if a dynamic variable size exceeds the chunksize. 10300 */ 10301 static void 10302 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10303 { 10304 uint64_t sval = 0; 10305 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10306 const dif_instr_t *text = dp->dtdo_buf; 10307 uint_t pc, srd = 0; 10308 uint_t ttop = 0; 10309 size_t size, ksize; 10310 uint_t id, i; 10311 10312 for (pc = 0; pc < dp->dtdo_len; pc++) { 10313 dif_instr_t instr = text[pc]; 10314 uint_t op = DIF_INSTR_OP(instr); 10315 uint_t rd = DIF_INSTR_RD(instr); 10316 uint_t r1 = DIF_INSTR_R1(instr); 10317 uint_t nkeys = 0; 10318 uchar_t scope = 0; 10319 10320 dtrace_key_t *key = tupregs; 10321 10322 switch (op) { 10323 case DIF_OP_SETX: 10324 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10325 srd = rd; 10326 continue; 10327 10328 case DIF_OP_STTS: 10329 key = &tupregs[DIF_DTR_NREGS]; 10330 key[0].dttk_size = 0; 10331 key[1].dttk_size = 0; 10332 nkeys = 2; 10333 scope = DIFV_SCOPE_THREAD; 10334 break; 10335 10336 case DIF_OP_STGAA: 10337 case DIF_OP_STTAA: 10338 nkeys = ttop; 10339 10340 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10341 key[nkeys++].dttk_size = 0; 10342 10343 key[nkeys++].dttk_size = 0; 10344 10345 if (op == DIF_OP_STTAA) { 10346 scope = DIFV_SCOPE_THREAD; 10347 } else { 10348 scope = DIFV_SCOPE_GLOBAL; 10349 } 10350 10351 break; 10352 10353 case DIF_OP_PUSHTR: 10354 if (ttop == DIF_DTR_NREGS) 10355 return; 10356 10357 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10358 /* 10359 * If the register for the size of the "pushtr" 10360 * is %r0 (or the value is 0) and the type is 10361 * a string, we'll use the system-wide default 10362 * string size. 10363 */ 10364 tupregs[ttop++].dttk_size = 10365 dtrace_strsize_default; 10366 } else { 10367 if (srd == 0) 10368 return; 10369 10370 if (sval > LONG_MAX) 10371 return; 10372 10373 tupregs[ttop++].dttk_size = sval; 10374 } 10375 10376 break; 10377 10378 case DIF_OP_PUSHTV: 10379 if (ttop == DIF_DTR_NREGS) 10380 return; 10381 10382 tupregs[ttop++].dttk_size = 0; 10383 break; 10384 10385 case DIF_OP_FLUSHTS: 10386 ttop = 0; 10387 break; 10388 10389 case DIF_OP_POPTS: 10390 if (ttop != 0) 10391 ttop--; 10392 break; 10393 } 10394 10395 sval = 0; 10396 srd = 0; 10397 10398 if (nkeys == 0) 10399 continue; 10400 10401 /* 10402 * We have a dynamic variable allocation; calculate its size. 10403 */ 10404 for (ksize = 0, i = 0; i < nkeys; i++) 10405 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10406 10407 size = sizeof (dtrace_dynvar_t); 10408 size += sizeof (dtrace_key_t) * (nkeys - 1); 10409 size += ksize; 10410 10411 /* 10412 * Now we need to determine the size of the stored data. 10413 */ 10414 id = DIF_INSTR_VAR(instr); 10415 10416 for (i = 0; i < dp->dtdo_varlen; i++) { 10417 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10418 10419 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10420 size += v->dtdv_type.dtdt_size; 10421 break; 10422 } 10423 } 10424 10425 if (i == dp->dtdo_varlen) 10426 return; 10427 10428 /* 10429 * We have the size. If this is larger than the chunk size 10430 * for our dynamic variable state, reset the chunk size. 10431 */ 10432 size = P2ROUNDUP(size, sizeof (uint64_t)); 10433 10434 /* 10435 * Before setting the chunk size, check that we're not going 10436 * to set it to a negative value... 10437 */ 10438 if (size > LONG_MAX) 10439 return; 10440 10441 /* 10442 * ...and make certain that we didn't badly overflow. 10443 */ 10444 if (size < ksize || size < sizeof (dtrace_dynvar_t)) 10445 return; 10446 10447 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10448 vstate->dtvs_dynvars.dtds_chunksize = size; 10449 } 10450 } 10451 10452 static void 10453 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10454 { 10455 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10456 uint_t id; 10457 10458 ASSERT(MUTEX_HELD(&dtrace_lock)); 10459 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10460 10461 for (i = 0; i < dp->dtdo_varlen; i++) { 10462 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10463 dtrace_statvar_t *svar, ***svarp = NULL; 10464 size_t dsize = 0; 10465 uint8_t scope = v->dtdv_scope; 10466 int *np = NULL; 10467 10468 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10469 continue; 10470 10471 id -= DIF_VAR_OTHER_UBASE; 10472 10473 switch (scope) { 10474 case DIFV_SCOPE_THREAD: 10475 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10476 dtrace_difv_t *tlocals; 10477 10478 if ((ntlocals = (otlocals << 1)) == 0) 10479 ntlocals = 1; 10480 10481 osz = otlocals * sizeof (dtrace_difv_t); 10482 nsz = ntlocals * sizeof (dtrace_difv_t); 10483 10484 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10485 10486 if (osz != 0) { 10487 bcopy(vstate->dtvs_tlocals, 10488 tlocals, osz); 10489 kmem_free(vstate->dtvs_tlocals, osz); 10490 } 10491 10492 vstate->dtvs_tlocals = tlocals; 10493 vstate->dtvs_ntlocals = ntlocals; 10494 } 10495 10496 vstate->dtvs_tlocals[id] = *v; 10497 continue; 10498 10499 case DIFV_SCOPE_LOCAL: 10500 np = &vstate->dtvs_nlocals; 10501 svarp = &vstate->dtvs_locals; 10502 10503 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10504 dsize = NCPU * (v->dtdv_type.dtdt_size + 10505 sizeof (uint64_t)); 10506 else 10507 dsize = NCPU * sizeof (uint64_t); 10508 10509 break; 10510 10511 case DIFV_SCOPE_GLOBAL: 10512 np = &vstate->dtvs_nglobals; 10513 svarp = &vstate->dtvs_globals; 10514 10515 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10516 dsize = v->dtdv_type.dtdt_size + 10517 sizeof (uint64_t); 10518 10519 break; 10520 10521 default: 10522 ASSERT(0); 10523 } 10524 10525 while (id >= (oldsvars = *np)) { 10526 dtrace_statvar_t **statics; 10527 int newsvars, oldsize, newsize; 10528 10529 if ((newsvars = (oldsvars << 1)) == 0) 10530 newsvars = 1; 10531 10532 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10533 newsize = newsvars * sizeof (dtrace_statvar_t *); 10534 10535 statics = kmem_zalloc(newsize, KM_SLEEP); 10536 10537 if (oldsize != 0) { 10538 bcopy(*svarp, statics, oldsize); 10539 kmem_free(*svarp, oldsize); 10540 } 10541 10542 *svarp = statics; 10543 *np = newsvars; 10544 } 10545 10546 if ((svar = (*svarp)[id]) == NULL) { 10547 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10548 svar->dtsv_var = *v; 10549 10550 if ((svar->dtsv_size = dsize) != 0) { 10551 svar->dtsv_data = (uint64_t)(uintptr_t) 10552 kmem_zalloc(dsize, KM_SLEEP); 10553 } 10554 10555 (*svarp)[id] = svar; 10556 } 10557 10558 svar->dtsv_refcnt++; 10559 } 10560 10561 dtrace_difo_chunksize(dp, vstate); 10562 dtrace_difo_hold(dp); 10563 } 10564 10565 static dtrace_difo_t * 10566 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10567 { 10568 dtrace_difo_t *new; 10569 size_t sz; 10570 10571 ASSERT(dp->dtdo_buf != NULL); 10572 ASSERT(dp->dtdo_refcnt != 0); 10573 10574 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10575 10576 ASSERT(dp->dtdo_buf != NULL); 10577 sz = dp->dtdo_len * sizeof (dif_instr_t); 10578 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10579 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10580 new->dtdo_len = dp->dtdo_len; 10581 10582 if (dp->dtdo_strtab != NULL) { 10583 ASSERT(dp->dtdo_strlen != 0); 10584 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10585 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10586 new->dtdo_strlen = dp->dtdo_strlen; 10587 } 10588 10589 if (dp->dtdo_inttab != NULL) { 10590 ASSERT(dp->dtdo_intlen != 0); 10591 sz = dp->dtdo_intlen * sizeof (uint64_t); 10592 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10593 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10594 new->dtdo_intlen = dp->dtdo_intlen; 10595 } 10596 10597 if (dp->dtdo_vartab != NULL) { 10598 ASSERT(dp->dtdo_varlen != 0); 10599 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10600 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10601 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10602 new->dtdo_varlen = dp->dtdo_varlen; 10603 } 10604 10605 dtrace_difo_init(new, vstate); 10606 return (new); 10607 } 10608 10609 static void 10610 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10611 { 10612 int i; 10613 10614 ASSERT(dp->dtdo_refcnt == 0); 10615 10616 for (i = 0; i < dp->dtdo_varlen; i++) { 10617 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10618 dtrace_statvar_t *svar, **svarp = NULL; 10619 uint_t id; 10620 uint8_t scope = v->dtdv_scope; 10621 int *np = NULL; 10622 10623 switch (scope) { 10624 case DIFV_SCOPE_THREAD: 10625 continue; 10626 10627 case DIFV_SCOPE_LOCAL: 10628 np = &vstate->dtvs_nlocals; 10629 svarp = vstate->dtvs_locals; 10630 break; 10631 10632 case DIFV_SCOPE_GLOBAL: 10633 np = &vstate->dtvs_nglobals; 10634 svarp = vstate->dtvs_globals; 10635 break; 10636 10637 default: 10638 ASSERT(0); 10639 } 10640 10641 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10642 continue; 10643 10644 id -= DIF_VAR_OTHER_UBASE; 10645 ASSERT(id < *np); 10646 10647 svar = svarp[id]; 10648 ASSERT(svar != NULL); 10649 ASSERT(svar->dtsv_refcnt > 0); 10650 10651 if (--svar->dtsv_refcnt > 0) 10652 continue; 10653 10654 if (svar->dtsv_size != 0) { 10655 ASSERT(svar->dtsv_data != 0); 10656 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10657 svar->dtsv_size); 10658 } 10659 10660 kmem_free(svar, sizeof (dtrace_statvar_t)); 10661 svarp[id] = NULL; 10662 } 10663 10664 if (dp->dtdo_buf != NULL) 10665 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10666 if (dp->dtdo_inttab != NULL) 10667 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10668 if (dp->dtdo_strtab != NULL) 10669 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10670 if (dp->dtdo_vartab != NULL) 10671 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10672 10673 kmem_free(dp, sizeof (dtrace_difo_t)); 10674 } 10675 10676 static void 10677 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10678 { 10679 int i; 10680 10681 ASSERT(MUTEX_HELD(&dtrace_lock)); 10682 ASSERT(dp->dtdo_refcnt != 0); 10683 10684 for (i = 0; i < dp->dtdo_varlen; i++) { 10685 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10686 10687 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10688 continue; 10689 10690 ASSERT(dtrace_vtime_references > 0); 10691 if (--dtrace_vtime_references == 0) 10692 dtrace_vtime_disable(); 10693 } 10694 10695 if (--dp->dtdo_refcnt == 0) 10696 dtrace_difo_destroy(dp, vstate); 10697 } 10698 10699 /* 10700 * DTrace Format Functions 10701 */ 10702 static uint16_t 10703 dtrace_format_add(dtrace_state_t *state, char *str) 10704 { 10705 char *fmt, **new; 10706 uint16_t ndx, len = strlen(str) + 1; 10707 10708 fmt = kmem_zalloc(len, KM_SLEEP); 10709 bcopy(str, fmt, len); 10710 10711 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10712 if (state->dts_formats[ndx] == NULL) { 10713 state->dts_formats[ndx] = fmt; 10714 return (ndx + 1); 10715 } 10716 } 10717 10718 if (state->dts_nformats == USHRT_MAX) { 10719 /* 10720 * This is only likely if a denial-of-service attack is being 10721 * attempted. As such, it's okay to fail silently here. 10722 */ 10723 kmem_free(fmt, len); 10724 return (0); 10725 } 10726 10727 /* 10728 * For simplicity, we always resize the formats array to be exactly the 10729 * number of formats. 10730 */ 10731 ndx = state->dts_nformats++; 10732 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10733 10734 if (state->dts_formats != NULL) { 10735 ASSERT(ndx != 0); 10736 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10737 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10738 } 10739 10740 state->dts_formats = new; 10741 state->dts_formats[ndx] = fmt; 10742 10743 return (ndx + 1); 10744 } 10745 10746 static void 10747 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10748 { 10749 char *fmt; 10750 10751 ASSERT(state->dts_formats != NULL); 10752 ASSERT(format <= state->dts_nformats); 10753 ASSERT(state->dts_formats[format - 1] != NULL); 10754 10755 fmt = state->dts_formats[format - 1]; 10756 kmem_free(fmt, strlen(fmt) + 1); 10757 state->dts_formats[format - 1] = NULL; 10758 } 10759 10760 static void 10761 dtrace_format_destroy(dtrace_state_t *state) 10762 { 10763 int i; 10764 10765 if (state->dts_nformats == 0) { 10766 ASSERT(state->dts_formats == NULL); 10767 return; 10768 } 10769 10770 ASSERT(state->dts_formats != NULL); 10771 10772 for (i = 0; i < state->dts_nformats; i++) { 10773 char *fmt = state->dts_formats[i]; 10774 10775 if (fmt == NULL) 10776 continue; 10777 10778 kmem_free(fmt, strlen(fmt) + 1); 10779 } 10780 10781 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10782 state->dts_nformats = 0; 10783 state->dts_formats = NULL; 10784 } 10785 10786 /* 10787 * DTrace Predicate Functions 10788 */ 10789 static dtrace_predicate_t * 10790 dtrace_predicate_create(dtrace_difo_t *dp) 10791 { 10792 dtrace_predicate_t *pred; 10793 10794 ASSERT(MUTEX_HELD(&dtrace_lock)); 10795 ASSERT(dp->dtdo_refcnt != 0); 10796 10797 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10798 pred->dtp_difo = dp; 10799 pred->dtp_refcnt = 1; 10800 10801 if (!dtrace_difo_cacheable(dp)) 10802 return (pred); 10803 10804 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10805 /* 10806 * This is only theoretically possible -- we have had 2^32 10807 * cacheable predicates on this machine. We cannot allow any 10808 * more predicates to become cacheable: as unlikely as it is, 10809 * there may be a thread caching a (now stale) predicate cache 10810 * ID. (N.B.: the temptation is being successfully resisted to 10811 * have this cmn_err() "Holy shit -- we executed this code!") 10812 */ 10813 return (pred); 10814 } 10815 10816 pred->dtp_cacheid = dtrace_predcache_id++; 10817 10818 return (pred); 10819 } 10820 10821 static void 10822 dtrace_predicate_hold(dtrace_predicate_t *pred) 10823 { 10824 ASSERT(MUTEX_HELD(&dtrace_lock)); 10825 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10826 ASSERT(pred->dtp_refcnt > 0); 10827 10828 pred->dtp_refcnt++; 10829 } 10830 10831 static void 10832 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10833 { 10834 dtrace_difo_t *dp = pred->dtp_difo; 10835 10836 ASSERT(MUTEX_HELD(&dtrace_lock)); 10837 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10838 ASSERT(pred->dtp_refcnt > 0); 10839 10840 if (--pred->dtp_refcnt == 0) { 10841 dtrace_difo_release(pred->dtp_difo, vstate); 10842 kmem_free(pred, sizeof (dtrace_predicate_t)); 10843 } 10844 } 10845 10846 /* 10847 * DTrace Action Description Functions 10848 */ 10849 static dtrace_actdesc_t * 10850 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 10851 uint64_t uarg, uint64_t arg) 10852 { 10853 dtrace_actdesc_t *act; 10854 10855 #ifdef illumos 10856 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 10857 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 10858 #endif 10859 10860 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 10861 act->dtad_kind = kind; 10862 act->dtad_ntuple = ntuple; 10863 act->dtad_uarg = uarg; 10864 act->dtad_arg = arg; 10865 act->dtad_refcnt = 1; 10866 10867 return (act); 10868 } 10869 10870 static void 10871 dtrace_actdesc_hold(dtrace_actdesc_t *act) 10872 { 10873 ASSERT(act->dtad_refcnt >= 1); 10874 act->dtad_refcnt++; 10875 } 10876 10877 static void 10878 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 10879 { 10880 dtrace_actkind_t kind = act->dtad_kind; 10881 dtrace_difo_t *dp; 10882 10883 ASSERT(act->dtad_refcnt >= 1); 10884 10885 if (--act->dtad_refcnt != 0) 10886 return; 10887 10888 if ((dp = act->dtad_difo) != NULL) 10889 dtrace_difo_release(dp, vstate); 10890 10891 if (DTRACEACT_ISPRINTFLIKE(kind)) { 10892 char *str = (char *)(uintptr_t)act->dtad_arg; 10893 10894 #ifdef illumos 10895 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 10896 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 10897 #endif 10898 10899 if (str != NULL) 10900 kmem_free(str, strlen(str) + 1); 10901 } 10902 10903 kmem_free(act, sizeof (dtrace_actdesc_t)); 10904 } 10905 10906 /* 10907 * DTrace ECB Functions 10908 */ 10909 static dtrace_ecb_t * 10910 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 10911 { 10912 dtrace_ecb_t *ecb; 10913 dtrace_epid_t epid; 10914 10915 ASSERT(MUTEX_HELD(&dtrace_lock)); 10916 10917 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 10918 ecb->dte_predicate = NULL; 10919 ecb->dte_probe = probe; 10920 10921 /* 10922 * The default size is the size of the default action: recording 10923 * the header. 10924 */ 10925 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 10926 ecb->dte_alignment = sizeof (dtrace_epid_t); 10927 10928 epid = state->dts_epid++; 10929 10930 if (epid - 1 >= state->dts_necbs) { 10931 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 10932 int necbs = state->dts_necbs << 1; 10933 10934 ASSERT(epid == state->dts_necbs + 1); 10935 10936 if (necbs == 0) { 10937 ASSERT(oecbs == NULL); 10938 necbs = 1; 10939 } 10940 10941 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 10942 10943 if (oecbs != NULL) 10944 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 10945 10946 dtrace_membar_producer(); 10947 state->dts_ecbs = ecbs; 10948 10949 if (oecbs != NULL) { 10950 /* 10951 * If this state is active, we must dtrace_sync() 10952 * before we can free the old dts_ecbs array: we're 10953 * coming in hot, and there may be active ring 10954 * buffer processing (which indexes into the dts_ecbs 10955 * array) on another CPU. 10956 */ 10957 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 10958 dtrace_sync(); 10959 10960 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 10961 } 10962 10963 dtrace_membar_producer(); 10964 state->dts_necbs = necbs; 10965 } 10966 10967 ecb->dte_state = state; 10968 10969 ASSERT(state->dts_ecbs[epid - 1] == NULL); 10970 dtrace_membar_producer(); 10971 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 10972 10973 return (ecb); 10974 } 10975 10976 static void 10977 dtrace_ecb_enable(dtrace_ecb_t *ecb) 10978 { 10979 dtrace_probe_t *probe = ecb->dte_probe; 10980 10981 ASSERT(MUTEX_HELD(&cpu_lock)); 10982 ASSERT(MUTEX_HELD(&dtrace_lock)); 10983 ASSERT(ecb->dte_next == NULL); 10984 10985 if (probe == NULL) { 10986 /* 10987 * This is the NULL probe -- there's nothing to do. 10988 */ 10989 return; 10990 } 10991 10992 if (probe->dtpr_ecb == NULL) { 10993 dtrace_provider_t *prov = probe->dtpr_provider; 10994 10995 /* 10996 * We're the first ECB on this probe. 10997 */ 10998 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 10999 11000 if (ecb->dte_predicate != NULL) 11001 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 11002 11003 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 11004 probe->dtpr_id, probe->dtpr_arg); 11005 } else { 11006 /* 11007 * This probe is already active. Swing the last pointer to 11008 * point to the new ECB, and issue a dtrace_sync() to assure 11009 * that all CPUs have seen the change. 11010 */ 11011 ASSERT(probe->dtpr_ecb_last != NULL); 11012 probe->dtpr_ecb_last->dte_next = ecb; 11013 probe->dtpr_ecb_last = ecb; 11014 probe->dtpr_predcache = 0; 11015 11016 dtrace_sync(); 11017 } 11018 } 11019 11020 static void 11021 dtrace_ecb_resize(dtrace_ecb_t *ecb) 11022 { 11023 dtrace_action_t *act; 11024 uint32_t curneeded = UINT32_MAX; 11025 uint32_t aggbase = UINT32_MAX; 11026 11027 /* 11028 * If we record anything, we always record the dtrace_rechdr_t. (And 11029 * we always record it first.) 11030 */ 11031 ecb->dte_size = sizeof (dtrace_rechdr_t); 11032 ecb->dte_alignment = sizeof (dtrace_epid_t); 11033 11034 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11035 dtrace_recdesc_t *rec = &act->dta_rec; 11036 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 11037 11038 ecb->dte_alignment = MAX(ecb->dte_alignment, 11039 rec->dtrd_alignment); 11040 11041 if (DTRACEACT_ISAGG(act->dta_kind)) { 11042 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11043 11044 ASSERT(rec->dtrd_size != 0); 11045 ASSERT(agg->dtag_first != NULL); 11046 ASSERT(act->dta_prev->dta_intuple); 11047 ASSERT(aggbase != UINT32_MAX); 11048 ASSERT(curneeded != UINT32_MAX); 11049 11050 agg->dtag_base = aggbase; 11051 11052 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11053 rec->dtrd_offset = curneeded; 11054 curneeded += rec->dtrd_size; 11055 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 11056 11057 aggbase = UINT32_MAX; 11058 curneeded = UINT32_MAX; 11059 } else if (act->dta_intuple) { 11060 if (curneeded == UINT32_MAX) { 11061 /* 11062 * This is the first record in a tuple. Align 11063 * curneeded to be at offset 4 in an 8-byte 11064 * aligned block. 11065 */ 11066 ASSERT(act->dta_prev == NULL || 11067 !act->dta_prev->dta_intuple); 11068 ASSERT3U(aggbase, ==, UINT32_MAX); 11069 curneeded = P2PHASEUP(ecb->dte_size, 11070 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 11071 11072 aggbase = curneeded - sizeof (dtrace_aggid_t); 11073 ASSERT(IS_P2ALIGNED(aggbase, 11074 sizeof (uint64_t))); 11075 } 11076 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 11077 rec->dtrd_offset = curneeded; 11078 curneeded += rec->dtrd_size; 11079 } else { 11080 /* tuples must be followed by an aggregation */ 11081 ASSERT(act->dta_prev == NULL || 11082 !act->dta_prev->dta_intuple); 11083 11084 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 11085 rec->dtrd_alignment); 11086 rec->dtrd_offset = ecb->dte_size; 11087 ecb->dte_size += rec->dtrd_size; 11088 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 11089 } 11090 } 11091 11092 if ((act = ecb->dte_action) != NULL && 11093 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 11094 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 11095 /* 11096 * If the size is still sizeof (dtrace_rechdr_t), then all 11097 * actions store no data; set the size to 0. 11098 */ 11099 ecb->dte_size = 0; 11100 } 11101 11102 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 11103 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 11104 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 11105 ecb->dte_needed); 11106 } 11107 11108 static dtrace_action_t * 11109 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11110 { 11111 dtrace_aggregation_t *agg; 11112 size_t size = sizeof (uint64_t); 11113 int ntuple = desc->dtad_ntuple; 11114 dtrace_action_t *act; 11115 dtrace_recdesc_t *frec; 11116 dtrace_aggid_t aggid; 11117 dtrace_state_t *state = ecb->dte_state; 11118 11119 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 11120 agg->dtag_ecb = ecb; 11121 11122 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 11123 11124 switch (desc->dtad_kind) { 11125 case DTRACEAGG_MIN: 11126 agg->dtag_initial = INT64_MAX; 11127 agg->dtag_aggregate = dtrace_aggregate_min; 11128 break; 11129 11130 case DTRACEAGG_MAX: 11131 agg->dtag_initial = INT64_MIN; 11132 agg->dtag_aggregate = dtrace_aggregate_max; 11133 break; 11134 11135 case DTRACEAGG_COUNT: 11136 agg->dtag_aggregate = dtrace_aggregate_count; 11137 break; 11138 11139 case DTRACEAGG_QUANTIZE: 11140 agg->dtag_aggregate = dtrace_aggregate_quantize; 11141 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11142 sizeof (uint64_t); 11143 break; 11144 11145 case DTRACEAGG_LQUANTIZE: { 11146 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11147 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11148 11149 agg->dtag_initial = desc->dtad_arg; 11150 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11151 11152 if (step == 0 || levels == 0) 11153 goto err; 11154 11155 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11156 break; 11157 } 11158 11159 case DTRACEAGG_LLQUANTIZE: { 11160 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11161 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11162 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11163 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11164 int64_t v; 11165 11166 agg->dtag_initial = desc->dtad_arg; 11167 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11168 11169 if (factor < 2 || low >= high || nsteps < factor) 11170 goto err; 11171 11172 /* 11173 * Now check that the number of steps evenly divides a power 11174 * of the factor. (This assures both integer bucket size and 11175 * linearity within each magnitude.) 11176 */ 11177 for (v = factor; v < nsteps; v *= factor) 11178 continue; 11179 11180 if ((v % nsteps) || (nsteps % factor)) 11181 goto err; 11182 11183 size = (dtrace_aggregate_llquantize_bucket(factor, 11184 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11185 break; 11186 } 11187 11188 case DTRACEAGG_AVG: 11189 agg->dtag_aggregate = dtrace_aggregate_avg; 11190 size = sizeof (uint64_t) * 2; 11191 break; 11192 11193 case DTRACEAGG_STDDEV: 11194 agg->dtag_aggregate = dtrace_aggregate_stddev; 11195 size = sizeof (uint64_t) * 4; 11196 break; 11197 11198 case DTRACEAGG_SUM: 11199 agg->dtag_aggregate = dtrace_aggregate_sum; 11200 break; 11201 11202 default: 11203 goto err; 11204 } 11205 11206 agg->dtag_action.dta_rec.dtrd_size = size; 11207 11208 if (ntuple == 0) 11209 goto err; 11210 11211 /* 11212 * We must make sure that we have enough actions for the n-tuple. 11213 */ 11214 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11215 if (DTRACEACT_ISAGG(act->dta_kind)) 11216 break; 11217 11218 if (--ntuple == 0) { 11219 /* 11220 * This is the action with which our n-tuple begins. 11221 */ 11222 agg->dtag_first = act; 11223 goto success; 11224 } 11225 } 11226 11227 /* 11228 * This n-tuple is short by ntuple elements. Return failure. 11229 */ 11230 ASSERT(ntuple != 0); 11231 err: 11232 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11233 return (NULL); 11234 11235 success: 11236 /* 11237 * If the last action in the tuple has a size of zero, it's actually 11238 * an expression argument for the aggregating action. 11239 */ 11240 ASSERT(ecb->dte_action_last != NULL); 11241 act = ecb->dte_action_last; 11242 11243 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11244 ASSERT(act->dta_difo != NULL); 11245 11246 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11247 agg->dtag_hasarg = 1; 11248 } 11249 11250 /* 11251 * We need to allocate an id for this aggregation. 11252 */ 11253 #ifdef illumos 11254 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11255 VM_BESTFIT | VM_SLEEP); 11256 #else 11257 aggid = alloc_unr(state->dts_aggid_arena); 11258 #endif 11259 11260 if (aggid - 1 >= state->dts_naggregations) { 11261 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11262 dtrace_aggregation_t **aggs; 11263 int naggs = state->dts_naggregations << 1; 11264 int onaggs = state->dts_naggregations; 11265 11266 ASSERT(aggid == state->dts_naggregations + 1); 11267 11268 if (naggs == 0) { 11269 ASSERT(oaggs == NULL); 11270 naggs = 1; 11271 } 11272 11273 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11274 11275 if (oaggs != NULL) { 11276 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11277 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11278 } 11279 11280 state->dts_aggregations = aggs; 11281 state->dts_naggregations = naggs; 11282 } 11283 11284 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11285 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11286 11287 frec = &agg->dtag_first->dta_rec; 11288 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11289 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11290 11291 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11292 ASSERT(!act->dta_intuple); 11293 act->dta_intuple = 1; 11294 } 11295 11296 return (&agg->dtag_action); 11297 } 11298 11299 static void 11300 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11301 { 11302 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11303 dtrace_state_t *state = ecb->dte_state; 11304 dtrace_aggid_t aggid = agg->dtag_id; 11305 11306 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11307 #ifdef illumos 11308 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11309 #else 11310 free_unr(state->dts_aggid_arena, aggid); 11311 #endif 11312 11313 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11314 state->dts_aggregations[aggid - 1] = NULL; 11315 11316 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11317 } 11318 11319 static int 11320 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11321 { 11322 dtrace_action_t *action, *last; 11323 dtrace_difo_t *dp = desc->dtad_difo; 11324 uint32_t size = 0, align = sizeof (uint8_t), mask; 11325 uint16_t format = 0; 11326 dtrace_recdesc_t *rec; 11327 dtrace_state_t *state = ecb->dte_state; 11328 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11329 uint64_t arg = desc->dtad_arg; 11330 11331 ASSERT(MUTEX_HELD(&dtrace_lock)); 11332 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11333 11334 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11335 /* 11336 * If this is an aggregating action, there must be neither 11337 * a speculate nor a commit on the action chain. 11338 */ 11339 dtrace_action_t *act; 11340 11341 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11342 if (act->dta_kind == DTRACEACT_COMMIT) 11343 return (EINVAL); 11344 11345 if (act->dta_kind == DTRACEACT_SPECULATE) 11346 return (EINVAL); 11347 } 11348 11349 action = dtrace_ecb_aggregation_create(ecb, desc); 11350 11351 if (action == NULL) 11352 return (EINVAL); 11353 } else { 11354 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11355 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11356 dp != NULL && dp->dtdo_destructive)) { 11357 state->dts_destructive = 1; 11358 } 11359 11360 switch (desc->dtad_kind) { 11361 case DTRACEACT_PRINTF: 11362 case DTRACEACT_PRINTA: 11363 case DTRACEACT_SYSTEM: 11364 case DTRACEACT_FREOPEN: 11365 case DTRACEACT_DIFEXPR: 11366 /* 11367 * We know that our arg is a string -- turn it into a 11368 * format. 11369 */ 11370 if (arg == 0) { 11371 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11372 desc->dtad_kind == DTRACEACT_DIFEXPR); 11373 format = 0; 11374 } else { 11375 ASSERT(arg != 0); 11376 #ifdef illumos 11377 ASSERT(arg > KERNELBASE); 11378 #endif 11379 format = dtrace_format_add(state, 11380 (char *)(uintptr_t)arg); 11381 } 11382 11383 /*FALLTHROUGH*/ 11384 case DTRACEACT_LIBACT: 11385 case DTRACEACT_TRACEMEM: 11386 case DTRACEACT_TRACEMEM_DYNSIZE: 11387 if (dp == NULL) 11388 return (EINVAL); 11389 11390 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11391 break; 11392 11393 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11394 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11395 return (EINVAL); 11396 11397 size = opt[DTRACEOPT_STRSIZE]; 11398 } 11399 11400 break; 11401 11402 case DTRACEACT_STACK: 11403 if ((nframes = arg) == 0) { 11404 nframes = opt[DTRACEOPT_STACKFRAMES]; 11405 ASSERT(nframes > 0); 11406 arg = nframes; 11407 } 11408 11409 size = nframes * sizeof (pc_t); 11410 break; 11411 11412 case DTRACEACT_JSTACK: 11413 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11414 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11415 11416 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11417 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11418 11419 arg = DTRACE_USTACK_ARG(nframes, strsize); 11420 11421 /*FALLTHROUGH*/ 11422 case DTRACEACT_USTACK: 11423 if (desc->dtad_kind != DTRACEACT_JSTACK && 11424 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11425 strsize = DTRACE_USTACK_STRSIZE(arg); 11426 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11427 ASSERT(nframes > 0); 11428 arg = DTRACE_USTACK_ARG(nframes, strsize); 11429 } 11430 11431 /* 11432 * Save a slot for the pid. 11433 */ 11434 size = (nframes + 1) * sizeof (uint64_t); 11435 size += DTRACE_USTACK_STRSIZE(arg); 11436 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11437 11438 break; 11439 11440 case DTRACEACT_SYM: 11441 case DTRACEACT_MOD: 11442 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11443 sizeof (uint64_t)) || 11444 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11445 return (EINVAL); 11446 break; 11447 11448 case DTRACEACT_USYM: 11449 case DTRACEACT_UMOD: 11450 case DTRACEACT_UADDR: 11451 if (dp == NULL || 11452 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11453 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11454 return (EINVAL); 11455 11456 /* 11457 * We have a slot for the pid, plus a slot for the 11458 * argument. To keep things simple (aligned with 11459 * bitness-neutral sizing), we store each as a 64-bit 11460 * quantity. 11461 */ 11462 size = 2 * sizeof (uint64_t); 11463 break; 11464 11465 case DTRACEACT_STOP: 11466 case DTRACEACT_BREAKPOINT: 11467 case DTRACEACT_PANIC: 11468 break; 11469 11470 case DTRACEACT_CHILL: 11471 case DTRACEACT_DISCARD: 11472 case DTRACEACT_RAISE: 11473 if (dp == NULL) 11474 return (EINVAL); 11475 break; 11476 11477 case DTRACEACT_EXIT: 11478 if (dp == NULL || 11479 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11480 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11481 return (EINVAL); 11482 break; 11483 11484 case DTRACEACT_SPECULATE: 11485 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11486 return (EINVAL); 11487 11488 if (dp == NULL) 11489 return (EINVAL); 11490 11491 state->dts_speculates = 1; 11492 break; 11493 11494 case DTRACEACT_PRINTM: 11495 size = dp->dtdo_rtype.dtdt_size; 11496 break; 11497 11498 case DTRACEACT_PRINTT: 11499 size = dp->dtdo_rtype.dtdt_size; 11500 break; 11501 11502 case DTRACEACT_COMMIT: { 11503 dtrace_action_t *act = ecb->dte_action; 11504 11505 for (; act != NULL; act = act->dta_next) { 11506 if (act->dta_kind == DTRACEACT_COMMIT) 11507 return (EINVAL); 11508 } 11509 11510 if (dp == NULL) 11511 return (EINVAL); 11512 break; 11513 } 11514 11515 default: 11516 return (EINVAL); 11517 } 11518 11519 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11520 /* 11521 * If this is a data-storing action or a speculate, 11522 * we must be sure that there isn't a commit on the 11523 * action chain. 11524 */ 11525 dtrace_action_t *act = ecb->dte_action; 11526 11527 for (; act != NULL; act = act->dta_next) { 11528 if (act->dta_kind == DTRACEACT_COMMIT) 11529 return (EINVAL); 11530 } 11531 } 11532 11533 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11534 action->dta_rec.dtrd_size = size; 11535 } 11536 11537 action->dta_refcnt = 1; 11538 rec = &action->dta_rec; 11539 size = rec->dtrd_size; 11540 11541 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11542 if (!(size & mask)) { 11543 align = mask + 1; 11544 break; 11545 } 11546 } 11547 11548 action->dta_kind = desc->dtad_kind; 11549 11550 if ((action->dta_difo = dp) != NULL) 11551 dtrace_difo_hold(dp); 11552 11553 rec->dtrd_action = action->dta_kind; 11554 rec->dtrd_arg = arg; 11555 rec->dtrd_uarg = desc->dtad_uarg; 11556 rec->dtrd_alignment = (uint16_t)align; 11557 rec->dtrd_format = format; 11558 11559 if ((last = ecb->dte_action_last) != NULL) { 11560 ASSERT(ecb->dte_action != NULL); 11561 action->dta_prev = last; 11562 last->dta_next = action; 11563 } else { 11564 ASSERT(ecb->dte_action == NULL); 11565 ecb->dte_action = action; 11566 } 11567 11568 ecb->dte_action_last = action; 11569 11570 return (0); 11571 } 11572 11573 static void 11574 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11575 { 11576 dtrace_action_t *act = ecb->dte_action, *next; 11577 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11578 dtrace_difo_t *dp; 11579 uint16_t format; 11580 11581 if (act != NULL && act->dta_refcnt > 1) { 11582 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11583 act->dta_refcnt--; 11584 } else { 11585 for (; act != NULL; act = next) { 11586 next = act->dta_next; 11587 ASSERT(next != NULL || act == ecb->dte_action_last); 11588 ASSERT(act->dta_refcnt == 1); 11589 11590 if ((format = act->dta_rec.dtrd_format) != 0) 11591 dtrace_format_remove(ecb->dte_state, format); 11592 11593 if ((dp = act->dta_difo) != NULL) 11594 dtrace_difo_release(dp, vstate); 11595 11596 if (DTRACEACT_ISAGG(act->dta_kind)) { 11597 dtrace_ecb_aggregation_destroy(ecb, act); 11598 } else { 11599 kmem_free(act, sizeof (dtrace_action_t)); 11600 } 11601 } 11602 } 11603 11604 ecb->dte_action = NULL; 11605 ecb->dte_action_last = NULL; 11606 ecb->dte_size = 0; 11607 } 11608 11609 static void 11610 dtrace_ecb_disable(dtrace_ecb_t *ecb) 11611 { 11612 /* 11613 * We disable the ECB by removing it from its probe. 11614 */ 11615 dtrace_ecb_t *pecb, *prev = NULL; 11616 dtrace_probe_t *probe = ecb->dte_probe; 11617 11618 ASSERT(MUTEX_HELD(&dtrace_lock)); 11619 11620 if (probe == NULL) { 11621 /* 11622 * This is the NULL probe; there is nothing to disable. 11623 */ 11624 return; 11625 } 11626 11627 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11628 if (pecb == ecb) 11629 break; 11630 prev = pecb; 11631 } 11632 11633 ASSERT(pecb != NULL); 11634 11635 if (prev == NULL) { 11636 probe->dtpr_ecb = ecb->dte_next; 11637 } else { 11638 prev->dte_next = ecb->dte_next; 11639 } 11640 11641 if (ecb == probe->dtpr_ecb_last) { 11642 ASSERT(ecb->dte_next == NULL); 11643 probe->dtpr_ecb_last = prev; 11644 } 11645 11646 /* 11647 * The ECB has been disconnected from the probe; now sync to assure 11648 * that all CPUs have seen the change before returning. 11649 */ 11650 dtrace_sync(); 11651 11652 if (probe->dtpr_ecb == NULL) { 11653 /* 11654 * That was the last ECB on the probe; clear the predicate 11655 * cache ID for the probe, disable it and sync one more time 11656 * to assure that we'll never hit it again. 11657 */ 11658 dtrace_provider_t *prov = probe->dtpr_provider; 11659 11660 ASSERT(ecb->dte_next == NULL); 11661 ASSERT(probe->dtpr_ecb_last == NULL); 11662 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11663 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11664 probe->dtpr_id, probe->dtpr_arg); 11665 dtrace_sync(); 11666 } else { 11667 /* 11668 * There is at least one ECB remaining on the probe. If there 11669 * is _exactly_ one, set the probe's predicate cache ID to be 11670 * the predicate cache ID of the remaining ECB. 11671 */ 11672 ASSERT(probe->dtpr_ecb_last != NULL); 11673 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11674 11675 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11676 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11677 11678 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11679 11680 if (p != NULL) 11681 probe->dtpr_predcache = p->dtp_cacheid; 11682 } 11683 11684 ecb->dte_next = NULL; 11685 } 11686 } 11687 11688 static void 11689 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11690 { 11691 dtrace_state_t *state = ecb->dte_state; 11692 dtrace_vstate_t *vstate = &state->dts_vstate; 11693 dtrace_predicate_t *pred; 11694 dtrace_epid_t epid = ecb->dte_epid; 11695 11696 ASSERT(MUTEX_HELD(&dtrace_lock)); 11697 ASSERT(ecb->dte_next == NULL); 11698 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11699 11700 if ((pred = ecb->dte_predicate) != NULL) 11701 dtrace_predicate_release(pred, vstate); 11702 11703 dtrace_ecb_action_remove(ecb); 11704 11705 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11706 state->dts_ecbs[epid - 1] = NULL; 11707 11708 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11709 } 11710 11711 static dtrace_ecb_t * 11712 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11713 dtrace_enabling_t *enab) 11714 { 11715 dtrace_ecb_t *ecb; 11716 dtrace_predicate_t *pred; 11717 dtrace_actdesc_t *act; 11718 dtrace_provider_t *prov; 11719 dtrace_ecbdesc_t *desc = enab->dten_current; 11720 11721 ASSERT(MUTEX_HELD(&dtrace_lock)); 11722 ASSERT(state != NULL); 11723 11724 ecb = dtrace_ecb_add(state, probe); 11725 ecb->dte_uarg = desc->dted_uarg; 11726 11727 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11728 dtrace_predicate_hold(pred); 11729 ecb->dte_predicate = pred; 11730 } 11731 11732 if (probe != NULL) { 11733 /* 11734 * If the provider shows more leg than the consumer is old 11735 * enough to see, we need to enable the appropriate implicit 11736 * predicate bits to prevent the ecb from activating at 11737 * revealing times. 11738 * 11739 * Providers specifying DTRACE_PRIV_USER at register time 11740 * are stating that they need the /proc-style privilege 11741 * model to be enforced, and this is what DTRACE_COND_OWNER 11742 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11743 */ 11744 prov = probe->dtpr_provider; 11745 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11746 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11747 ecb->dte_cond |= DTRACE_COND_OWNER; 11748 11749 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11750 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11751 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11752 11753 /* 11754 * If the provider shows us kernel innards and the user 11755 * is lacking sufficient privilege, enable the 11756 * DTRACE_COND_USERMODE implicit predicate. 11757 */ 11758 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11759 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11760 ecb->dte_cond |= DTRACE_COND_USERMODE; 11761 } 11762 11763 if (dtrace_ecb_create_cache != NULL) { 11764 /* 11765 * If we have a cached ecb, we'll use its action list instead 11766 * of creating our own (saving both time and space). 11767 */ 11768 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11769 dtrace_action_t *act = cached->dte_action; 11770 11771 if (act != NULL) { 11772 ASSERT(act->dta_refcnt > 0); 11773 act->dta_refcnt++; 11774 ecb->dte_action = act; 11775 ecb->dte_action_last = cached->dte_action_last; 11776 ecb->dte_needed = cached->dte_needed; 11777 ecb->dte_size = cached->dte_size; 11778 ecb->dte_alignment = cached->dte_alignment; 11779 } 11780 11781 return (ecb); 11782 } 11783 11784 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11785 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11786 dtrace_ecb_destroy(ecb); 11787 return (NULL); 11788 } 11789 } 11790 11791 dtrace_ecb_resize(ecb); 11792 11793 return (dtrace_ecb_create_cache = ecb); 11794 } 11795 11796 static int 11797 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11798 { 11799 dtrace_ecb_t *ecb; 11800 dtrace_enabling_t *enab = arg; 11801 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11802 11803 ASSERT(state != NULL); 11804 11805 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11806 /* 11807 * This probe was created in a generation for which this 11808 * enabling has previously created ECBs; we don't want to 11809 * enable it again, so just kick out. 11810 */ 11811 return (DTRACE_MATCH_NEXT); 11812 } 11813 11814 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11815 return (DTRACE_MATCH_DONE); 11816 11817 dtrace_ecb_enable(ecb); 11818 return (DTRACE_MATCH_NEXT); 11819 } 11820 11821 static dtrace_ecb_t * 11822 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11823 { 11824 dtrace_ecb_t *ecb; 11825 11826 ASSERT(MUTEX_HELD(&dtrace_lock)); 11827 11828 if (id == 0 || id > state->dts_necbs) 11829 return (NULL); 11830 11831 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11832 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11833 11834 return (state->dts_ecbs[id - 1]); 11835 } 11836 11837 static dtrace_aggregation_t * 11838 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 11839 { 11840 dtrace_aggregation_t *agg; 11841 11842 ASSERT(MUTEX_HELD(&dtrace_lock)); 11843 11844 if (id == 0 || id > state->dts_naggregations) 11845 return (NULL); 11846 11847 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 11848 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 11849 agg->dtag_id == id); 11850 11851 return (state->dts_aggregations[id - 1]); 11852 } 11853 11854 /* 11855 * DTrace Buffer Functions 11856 * 11857 * The following functions manipulate DTrace buffers. Most of these functions 11858 * are called in the context of establishing or processing consumer state; 11859 * exceptions are explicitly noted. 11860 */ 11861 11862 /* 11863 * Note: called from cross call context. This function switches the two 11864 * buffers on a given CPU. The atomicity of this operation is assured by 11865 * disabling interrupts while the actual switch takes place; the disabling of 11866 * interrupts serializes the execution with any execution of dtrace_probe() on 11867 * the same CPU. 11868 */ 11869 static void 11870 dtrace_buffer_switch(dtrace_buffer_t *buf) 11871 { 11872 caddr_t tomax = buf->dtb_tomax; 11873 caddr_t xamot = buf->dtb_xamot; 11874 dtrace_icookie_t cookie; 11875 hrtime_t now; 11876 11877 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11878 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 11879 11880 cookie = dtrace_interrupt_disable(); 11881 now = dtrace_gethrtime(); 11882 buf->dtb_tomax = xamot; 11883 buf->dtb_xamot = tomax; 11884 buf->dtb_xamot_drops = buf->dtb_drops; 11885 buf->dtb_xamot_offset = buf->dtb_offset; 11886 buf->dtb_xamot_errors = buf->dtb_errors; 11887 buf->dtb_xamot_flags = buf->dtb_flags; 11888 buf->dtb_offset = 0; 11889 buf->dtb_drops = 0; 11890 buf->dtb_errors = 0; 11891 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 11892 buf->dtb_interval = now - buf->dtb_switched; 11893 buf->dtb_switched = now; 11894 dtrace_interrupt_enable(cookie); 11895 } 11896 11897 /* 11898 * Note: called from cross call context. This function activates a buffer 11899 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 11900 * is guaranteed by the disabling of interrupts. 11901 */ 11902 static void 11903 dtrace_buffer_activate(dtrace_state_t *state) 11904 { 11905 dtrace_buffer_t *buf; 11906 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 11907 11908 buf = &state->dts_buffer[curcpu]; 11909 11910 if (buf->dtb_tomax != NULL) { 11911 /* 11912 * We might like to assert that the buffer is marked inactive, 11913 * but this isn't necessarily true: the buffer for the CPU 11914 * that processes the BEGIN probe has its buffer activated 11915 * manually. In this case, we take the (harmless) action 11916 * re-clearing the bit INACTIVE bit. 11917 */ 11918 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 11919 } 11920 11921 dtrace_interrupt_enable(cookie); 11922 } 11923 11924 #ifdef __FreeBSD__ 11925 /* 11926 * Activate the specified per-CPU buffer. This is used instead of 11927 * dtrace_buffer_activate() when APs have not yet started, i.e. when 11928 * activating anonymous state. 11929 */ 11930 static void 11931 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu) 11932 { 11933 11934 if (state->dts_buffer[cpu].dtb_tomax != NULL) 11935 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11936 } 11937 #endif 11938 11939 static int 11940 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 11941 processorid_t cpu, int *factor) 11942 { 11943 #ifdef illumos 11944 cpu_t *cp; 11945 #endif 11946 dtrace_buffer_t *buf; 11947 int allocated = 0, desired = 0; 11948 11949 #ifdef illumos 11950 ASSERT(MUTEX_HELD(&cpu_lock)); 11951 ASSERT(MUTEX_HELD(&dtrace_lock)); 11952 11953 *factor = 1; 11954 11955 if (size > dtrace_nonroot_maxsize && 11956 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 11957 return (EFBIG); 11958 11959 cp = cpu_list; 11960 11961 do { 11962 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 11963 continue; 11964 11965 buf = &bufs[cp->cpu_id]; 11966 11967 /* 11968 * If there is already a buffer allocated for this CPU, it 11969 * is only possible that this is a DR event. In this case, 11970 */ 11971 if (buf->dtb_tomax != NULL) { 11972 ASSERT(buf->dtb_size == size); 11973 continue; 11974 } 11975 11976 ASSERT(buf->dtb_xamot == NULL); 11977 11978 if ((buf->dtb_tomax = kmem_zalloc(size, 11979 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11980 goto err; 11981 11982 buf->dtb_size = size; 11983 buf->dtb_flags = flags; 11984 buf->dtb_offset = 0; 11985 buf->dtb_drops = 0; 11986 11987 if (flags & DTRACEBUF_NOSWITCH) 11988 continue; 11989 11990 if ((buf->dtb_xamot = kmem_zalloc(size, 11991 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11992 goto err; 11993 } while ((cp = cp->cpu_next) != cpu_list); 11994 11995 return (0); 11996 11997 err: 11998 cp = cpu_list; 11999 12000 do { 12001 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 12002 continue; 12003 12004 buf = &bufs[cp->cpu_id]; 12005 desired += 2; 12006 12007 if (buf->dtb_xamot != NULL) { 12008 ASSERT(buf->dtb_tomax != NULL); 12009 ASSERT(buf->dtb_size == size); 12010 kmem_free(buf->dtb_xamot, size); 12011 allocated++; 12012 } 12013 12014 if (buf->dtb_tomax != NULL) { 12015 ASSERT(buf->dtb_size == size); 12016 kmem_free(buf->dtb_tomax, size); 12017 allocated++; 12018 } 12019 12020 buf->dtb_tomax = NULL; 12021 buf->dtb_xamot = NULL; 12022 buf->dtb_size = 0; 12023 } while ((cp = cp->cpu_next) != cpu_list); 12024 #else 12025 int i; 12026 12027 *factor = 1; 12028 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 12029 defined(__mips__) || defined(__powerpc__) || defined(__riscv__) 12030 /* 12031 * FreeBSD isn't good at limiting the amount of memory we 12032 * ask to malloc, so let's place a limit here before trying 12033 * to do something that might well end in tears at bedtime. 12034 */ 12035 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 12036 return (ENOMEM); 12037 #endif 12038 12039 ASSERT(MUTEX_HELD(&dtrace_lock)); 12040 CPU_FOREACH(i) { 12041 if (cpu != DTRACE_CPUALL && cpu != i) 12042 continue; 12043 12044 buf = &bufs[i]; 12045 12046 /* 12047 * If there is already a buffer allocated for this CPU, it 12048 * is only possible that this is a DR event. In this case, 12049 * the buffer size must match our specified size. 12050 */ 12051 if (buf->dtb_tomax != NULL) { 12052 ASSERT(buf->dtb_size == size); 12053 continue; 12054 } 12055 12056 ASSERT(buf->dtb_xamot == NULL); 12057 12058 if ((buf->dtb_tomax = kmem_zalloc(size, 12059 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12060 goto err; 12061 12062 buf->dtb_size = size; 12063 buf->dtb_flags = flags; 12064 buf->dtb_offset = 0; 12065 buf->dtb_drops = 0; 12066 12067 if (flags & DTRACEBUF_NOSWITCH) 12068 continue; 12069 12070 if ((buf->dtb_xamot = kmem_zalloc(size, 12071 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 12072 goto err; 12073 } 12074 12075 return (0); 12076 12077 err: 12078 /* 12079 * Error allocating memory, so free the buffers that were 12080 * allocated before the failed allocation. 12081 */ 12082 CPU_FOREACH(i) { 12083 if (cpu != DTRACE_CPUALL && cpu != i) 12084 continue; 12085 12086 buf = &bufs[i]; 12087 desired += 2; 12088 12089 if (buf->dtb_xamot != NULL) { 12090 ASSERT(buf->dtb_tomax != NULL); 12091 ASSERT(buf->dtb_size == size); 12092 kmem_free(buf->dtb_xamot, size); 12093 allocated++; 12094 } 12095 12096 if (buf->dtb_tomax != NULL) { 12097 ASSERT(buf->dtb_size == size); 12098 kmem_free(buf->dtb_tomax, size); 12099 allocated++; 12100 } 12101 12102 buf->dtb_tomax = NULL; 12103 buf->dtb_xamot = NULL; 12104 buf->dtb_size = 0; 12105 12106 } 12107 #endif 12108 *factor = desired / (allocated > 0 ? allocated : 1); 12109 12110 return (ENOMEM); 12111 } 12112 12113 /* 12114 * Note: called from probe context. This function just increments the drop 12115 * count on a buffer. It has been made a function to allow for the 12116 * possibility of understanding the source of mysterious drop counts. (A 12117 * problem for which one may be particularly disappointed that DTrace cannot 12118 * be used to understand DTrace.) 12119 */ 12120 static void 12121 dtrace_buffer_drop(dtrace_buffer_t *buf) 12122 { 12123 buf->dtb_drops++; 12124 } 12125 12126 /* 12127 * Note: called from probe context. This function is called to reserve space 12128 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 12129 * mstate. Returns the new offset in the buffer, or a negative value if an 12130 * error has occurred. 12131 */ 12132 static intptr_t 12133 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 12134 dtrace_state_t *state, dtrace_mstate_t *mstate) 12135 { 12136 intptr_t offs = buf->dtb_offset, soffs; 12137 intptr_t woffs; 12138 caddr_t tomax; 12139 size_t total; 12140 12141 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 12142 return (-1); 12143 12144 if ((tomax = buf->dtb_tomax) == NULL) { 12145 dtrace_buffer_drop(buf); 12146 return (-1); 12147 } 12148 12149 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 12150 while (offs & (align - 1)) { 12151 /* 12152 * Assert that our alignment is off by a number which 12153 * is itself sizeof (uint32_t) aligned. 12154 */ 12155 ASSERT(!((align - (offs & (align - 1))) & 12156 (sizeof (uint32_t) - 1))); 12157 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12158 offs += sizeof (uint32_t); 12159 } 12160 12161 if ((soffs = offs + needed) > buf->dtb_size) { 12162 dtrace_buffer_drop(buf); 12163 return (-1); 12164 } 12165 12166 if (mstate == NULL) 12167 return (offs); 12168 12169 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12170 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12171 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12172 12173 return (offs); 12174 } 12175 12176 if (buf->dtb_flags & DTRACEBUF_FILL) { 12177 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12178 (buf->dtb_flags & DTRACEBUF_FULL)) 12179 return (-1); 12180 goto out; 12181 } 12182 12183 total = needed + (offs & (align - 1)); 12184 12185 /* 12186 * For a ring buffer, life is quite a bit more complicated. Before 12187 * we can store any padding, we need to adjust our wrapping offset. 12188 * (If we've never before wrapped or we're not about to, no adjustment 12189 * is required.) 12190 */ 12191 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12192 offs + total > buf->dtb_size) { 12193 woffs = buf->dtb_xamot_offset; 12194 12195 if (offs + total > buf->dtb_size) { 12196 /* 12197 * We can't fit in the end of the buffer. First, a 12198 * sanity check that we can fit in the buffer at all. 12199 */ 12200 if (total > buf->dtb_size) { 12201 dtrace_buffer_drop(buf); 12202 return (-1); 12203 } 12204 12205 /* 12206 * We're going to be storing at the top of the buffer, 12207 * so now we need to deal with the wrapped offset. We 12208 * only reset our wrapped offset to 0 if it is 12209 * currently greater than the current offset. If it 12210 * is less than the current offset, it is because a 12211 * previous allocation induced a wrap -- but the 12212 * allocation didn't subsequently take the space due 12213 * to an error or false predicate evaluation. In this 12214 * case, we'll just leave the wrapped offset alone: if 12215 * the wrapped offset hasn't been advanced far enough 12216 * for this allocation, it will be adjusted in the 12217 * lower loop. 12218 */ 12219 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12220 if (woffs >= offs) 12221 woffs = 0; 12222 } else { 12223 woffs = 0; 12224 } 12225 12226 /* 12227 * Now we know that we're going to be storing to the 12228 * top of the buffer and that there is room for us 12229 * there. We need to clear the buffer from the current 12230 * offset to the end (there may be old gunk there). 12231 */ 12232 while (offs < buf->dtb_size) 12233 tomax[offs++] = 0; 12234 12235 /* 12236 * We need to set our offset to zero. And because we 12237 * are wrapping, we need to set the bit indicating as 12238 * much. We can also adjust our needed space back 12239 * down to the space required by the ECB -- we know 12240 * that the top of the buffer is aligned. 12241 */ 12242 offs = 0; 12243 total = needed; 12244 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12245 } else { 12246 /* 12247 * There is room for us in the buffer, so we simply 12248 * need to check the wrapped offset. 12249 */ 12250 if (woffs < offs) { 12251 /* 12252 * The wrapped offset is less than the offset. 12253 * This can happen if we allocated buffer space 12254 * that induced a wrap, but then we didn't 12255 * subsequently take the space due to an error 12256 * or false predicate evaluation. This is 12257 * okay; we know that _this_ allocation isn't 12258 * going to induce a wrap. We still can't 12259 * reset the wrapped offset to be zero, 12260 * however: the space may have been trashed in 12261 * the previous failed probe attempt. But at 12262 * least the wrapped offset doesn't need to 12263 * be adjusted at all... 12264 */ 12265 goto out; 12266 } 12267 } 12268 12269 while (offs + total > woffs) { 12270 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12271 size_t size; 12272 12273 if (epid == DTRACE_EPIDNONE) { 12274 size = sizeof (uint32_t); 12275 } else { 12276 ASSERT3U(epid, <=, state->dts_necbs); 12277 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12278 12279 size = state->dts_ecbs[epid - 1]->dte_size; 12280 } 12281 12282 ASSERT(woffs + size <= buf->dtb_size); 12283 ASSERT(size != 0); 12284 12285 if (woffs + size == buf->dtb_size) { 12286 /* 12287 * We've reached the end of the buffer; we want 12288 * to set the wrapped offset to 0 and break 12289 * out. However, if the offs is 0, then we're 12290 * in a strange edge-condition: the amount of 12291 * space that we want to reserve plus the size 12292 * of the record that we're overwriting is 12293 * greater than the size of the buffer. This 12294 * is problematic because if we reserve the 12295 * space but subsequently don't consume it (due 12296 * to a failed predicate or error) the wrapped 12297 * offset will be 0 -- yet the EPID at offset 0 12298 * will not be committed. This situation is 12299 * relatively easy to deal with: if we're in 12300 * this case, the buffer is indistinguishable 12301 * from one that hasn't wrapped; we need only 12302 * finish the job by clearing the wrapped bit, 12303 * explicitly setting the offset to be 0, and 12304 * zero'ing out the old data in the buffer. 12305 */ 12306 if (offs == 0) { 12307 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12308 buf->dtb_offset = 0; 12309 woffs = total; 12310 12311 while (woffs < buf->dtb_size) 12312 tomax[woffs++] = 0; 12313 } 12314 12315 woffs = 0; 12316 break; 12317 } 12318 12319 woffs += size; 12320 } 12321 12322 /* 12323 * We have a wrapped offset. It may be that the wrapped offset 12324 * has become zero -- that's okay. 12325 */ 12326 buf->dtb_xamot_offset = woffs; 12327 } 12328 12329 out: 12330 /* 12331 * Now we can plow the buffer with any necessary padding. 12332 */ 12333 while (offs & (align - 1)) { 12334 /* 12335 * Assert that our alignment is off by a number which 12336 * is itself sizeof (uint32_t) aligned. 12337 */ 12338 ASSERT(!((align - (offs & (align - 1))) & 12339 (sizeof (uint32_t) - 1))); 12340 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12341 offs += sizeof (uint32_t); 12342 } 12343 12344 if (buf->dtb_flags & DTRACEBUF_FILL) { 12345 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12346 buf->dtb_flags |= DTRACEBUF_FULL; 12347 return (-1); 12348 } 12349 } 12350 12351 if (mstate == NULL) 12352 return (offs); 12353 12354 /* 12355 * For ring buffers and fill buffers, the scratch space is always 12356 * the inactive buffer. 12357 */ 12358 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12359 mstate->dtms_scratch_size = buf->dtb_size; 12360 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12361 12362 return (offs); 12363 } 12364 12365 static void 12366 dtrace_buffer_polish(dtrace_buffer_t *buf) 12367 { 12368 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12369 ASSERT(MUTEX_HELD(&dtrace_lock)); 12370 12371 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12372 return; 12373 12374 /* 12375 * We need to polish the ring buffer. There are three cases: 12376 * 12377 * - The first (and presumably most common) is that there is no gap 12378 * between the buffer offset and the wrapped offset. In this case, 12379 * there is nothing in the buffer that isn't valid data; we can 12380 * mark the buffer as polished and return. 12381 * 12382 * - The second (less common than the first but still more common 12383 * than the third) is that there is a gap between the buffer offset 12384 * and the wrapped offset, and the wrapped offset is larger than the 12385 * buffer offset. This can happen because of an alignment issue, or 12386 * can happen because of a call to dtrace_buffer_reserve() that 12387 * didn't subsequently consume the buffer space. In this case, 12388 * we need to zero the data from the buffer offset to the wrapped 12389 * offset. 12390 * 12391 * - The third (and least common) is that there is a gap between the 12392 * buffer offset and the wrapped offset, but the wrapped offset is 12393 * _less_ than the buffer offset. This can only happen because a 12394 * call to dtrace_buffer_reserve() induced a wrap, but the space 12395 * was not subsequently consumed. In this case, we need to zero the 12396 * space from the offset to the end of the buffer _and_ from the 12397 * top of the buffer to the wrapped offset. 12398 */ 12399 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12400 bzero(buf->dtb_tomax + buf->dtb_offset, 12401 buf->dtb_xamot_offset - buf->dtb_offset); 12402 } 12403 12404 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12405 bzero(buf->dtb_tomax + buf->dtb_offset, 12406 buf->dtb_size - buf->dtb_offset); 12407 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12408 } 12409 } 12410 12411 /* 12412 * This routine determines if data generated at the specified time has likely 12413 * been entirely consumed at user-level. This routine is called to determine 12414 * if an ECB on a defunct probe (but for an active enabling) can be safely 12415 * disabled and destroyed. 12416 */ 12417 static int 12418 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12419 { 12420 int i; 12421 12422 for (i = 0; i < NCPU; i++) { 12423 dtrace_buffer_t *buf = &bufs[i]; 12424 12425 if (buf->dtb_size == 0) 12426 continue; 12427 12428 if (buf->dtb_flags & DTRACEBUF_RING) 12429 return (0); 12430 12431 if (!buf->dtb_switched && buf->dtb_offset != 0) 12432 return (0); 12433 12434 if (buf->dtb_switched - buf->dtb_interval < when) 12435 return (0); 12436 } 12437 12438 return (1); 12439 } 12440 12441 static void 12442 dtrace_buffer_free(dtrace_buffer_t *bufs) 12443 { 12444 int i; 12445 12446 for (i = 0; i < NCPU; i++) { 12447 dtrace_buffer_t *buf = &bufs[i]; 12448 12449 if (buf->dtb_tomax == NULL) { 12450 ASSERT(buf->dtb_xamot == NULL); 12451 ASSERT(buf->dtb_size == 0); 12452 continue; 12453 } 12454 12455 if (buf->dtb_xamot != NULL) { 12456 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12457 kmem_free(buf->dtb_xamot, buf->dtb_size); 12458 } 12459 12460 kmem_free(buf->dtb_tomax, buf->dtb_size); 12461 buf->dtb_size = 0; 12462 buf->dtb_tomax = NULL; 12463 buf->dtb_xamot = NULL; 12464 } 12465 } 12466 12467 /* 12468 * DTrace Enabling Functions 12469 */ 12470 static dtrace_enabling_t * 12471 dtrace_enabling_create(dtrace_vstate_t *vstate) 12472 { 12473 dtrace_enabling_t *enab; 12474 12475 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12476 enab->dten_vstate = vstate; 12477 12478 return (enab); 12479 } 12480 12481 static void 12482 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12483 { 12484 dtrace_ecbdesc_t **ndesc; 12485 size_t osize, nsize; 12486 12487 /* 12488 * We can't add to enablings after we've enabled them, or after we've 12489 * retained them. 12490 */ 12491 ASSERT(enab->dten_probegen == 0); 12492 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12493 12494 if (enab->dten_ndesc < enab->dten_maxdesc) { 12495 enab->dten_desc[enab->dten_ndesc++] = ecb; 12496 return; 12497 } 12498 12499 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12500 12501 if (enab->dten_maxdesc == 0) { 12502 enab->dten_maxdesc = 1; 12503 } else { 12504 enab->dten_maxdesc <<= 1; 12505 } 12506 12507 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12508 12509 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12510 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12511 bcopy(enab->dten_desc, ndesc, osize); 12512 if (enab->dten_desc != NULL) 12513 kmem_free(enab->dten_desc, osize); 12514 12515 enab->dten_desc = ndesc; 12516 enab->dten_desc[enab->dten_ndesc++] = ecb; 12517 } 12518 12519 static void 12520 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12521 dtrace_probedesc_t *pd) 12522 { 12523 dtrace_ecbdesc_t *new; 12524 dtrace_predicate_t *pred; 12525 dtrace_actdesc_t *act; 12526 12527 /* 12528 * We're going to create a new ECB description that matches the 12529 * specified ECB in every way, but has the specified probe description. 12530 */ 12531 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12532 12533 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12534 dtrace_predicate_hold(pred); 12535 12536 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12537 dtrace_actdesc_hold(act); 12538 12539 new->dted_action = ecb->dted_action; 12540 new->dted_pred = ecb->dted_pred; 12541 new->dted_probe = *pd; 12542 new->dted_uarg = ecb->dted_uarg; 12543 12544 dtrace_enabling_add(enab, new); 12545 } 12546 12547 static void 12548 dtrace_enabling_dump(dtrace_enabling_t *enab) 12549 { 12550 int i; 12551 12552 for (i = 0; i < enab->dten_ndesc; i++) { 12553 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12554 12555 #ifdef __FreeBSD__ 12556 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i, 12557 desc->dtpd_provider, desc->dtpd_mod, 12558 desc->dtpd_func, desc->dtpd_name); 12559 #else 12560 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12561 desc->dtpd_provider, desc->dtpd_mod, 12562 desc->dtpd_func, desc->dtpd_name); 12563 #endif 12564 } 12565 } 12566 12567 static void 12568 dtrace_enabling_destroy(dtrace_enabling_t *enab) 12569 { 12570 int i; 12571 dtrace_ecbdesc_t *ep; 12572 dtrace_vstate_t *vstate = enab->dten_vstate; 12573 12574 ASSERT(MUTEX_HELD(&dtrace_lock)); 12575 12576 for (i = 0; i < enab->dten_ndesc; i++) { 12577 dtrace_actdesc_t *act, *next; 12578 dtrace_predicate_t *pred; 12579 12580 ep = enab->dten_desc[i]; 12581 12582 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12583 dtrace_predicate_release(pred, vstate); 12584 12585 for (act = ep->dted_action; act != NULL; act = next) { 12586 next = act->dtad_next; 12587 dtrace_actdesc_release(act, vstate); 12588 } 12589 12590 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12591 } 12592 12593 if (enab->dten_desc != NULL) 12594 kmem_free(enab->dten_desc, 12595 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12596 12597 /* 12598 * If this was a retained enabling, decrement the dts_nretained count 12599 * and take it off of the dtrace_retained list. 12600 */ 12601 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12602 dtrace_retained == enab) { 12603 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12604 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12605 enab->dten_vstate->dtvs_state->dts_nretained--; 12606 dtrace_retained_gen++; 12607 } 12608 12609 if (enab->dten_prev == NULL) { 12610 if (dtrace_retained == enab) { 12611 dtrace_retained = enab->dten_next; 12612 12613 if (dtrace_retained != NULL) 12614 dtrace_retained->dten_prev = NULL; 12615 } 12616 } else { 12617 ASSERT(enab != dtrace_retained); 12618 ASSERT(dtrace_retained != NULL); 12619 enab->dten_prev->dten_next = enab->dten_next; 12620 } 12621 12622 if (enab->dten_next != NULL) { 12623 ASSERT(dtrace_retained != NULL); 12624 enab->dten_next->dten_prev = enab->dten_prev; 12625 } 12626 12627 kmem_free(enab, sizeof (dtrace_enabling_t)); 12628 } 12629 12630 static int 12631 dtrace_enabling_retain(dtrace_enabling_t *enab) 12632 { 12633 dtrace_state_t *state; 12634 12635 ASSERT(MUTEX_HELD(&dtrace_lock)); 12636 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12637 ASSERT(enab->dten_vstate != NULL); 12638 12639 state = enab->dten_vstate->dtvs_state; 12640 ASSERT(state != NULL); 12641 12642 /* 12643 * We only allow each state to retain dtrace_retain_max enablings. 12644 */ 12645 if (state->dts_nretained >= dtrace_retain_max) 12646 return (ENOSPC); 12647 12648 state->dts_nretained++; 12649 dtrace_retained_gen++; 12650 12651 if (dtrace_retained == NULL) { 12652 dtrace_retained = enab; 12653 return (0); 12654 } 12655 12656 enab->dten_next = dtrace_retained; 12657 dtrace_retained->dten_prev = enab; 12658 dtrace_retained = enab; 12659 12660 return (0); 12661 } 12662 12663 static int 12664 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12665 dtrace_probedesc_t *create) 12666 { 12667 dtrace_enabling_t *new, *enab; 12668 int found = 0, err = ENOENT; 12669 12670 ASSERT(MUTEX_HELD(&dtrace_lock)); 12671 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12672 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12673 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12674 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12675 12676 new = dtrace_enabling_create(&state->dts_vstate); 12677 12678 /* 12679 * Iterate over all retained enablings, looking for enablings that 12680 * match the specified state. 12681 */ 12682 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12683 int i; 12684 12685 /* 12686 * dtvs_state can only be NULL for helper enablings -- and 12687 * helper enablings can't be retained. 12688 */ 12689 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12690 12691 if (enab->dten_vstate->dtvs_state != state) 12692 continue; 12693 12694 /* 12695 * Now iterate over each probe description; we're looking for 12696 * an exact match to the specified probe description. 12697 */ 12698 for (i = 0; i < enab->dten_ndesc; i++) { 12699 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12700 dtrace_probedesc_t *pd = &ep->dted_probe; 12701 12702 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12703 continue; 12704 12705 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12706 continue; 12707 12708 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12709 continue; 12710 12711 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12712 continue; 12713 12714 /* 12715 * We have a winning probe! Add it to our growing 12716 * enabling. 12717 */ 12718 found = 1; 12719 dtrace_enabling_addlike(new, ep, create); 12720 } 12721 } 12722 12723 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12724 dtrace_enabling_destroy(new); 12725 return (err); 12726 } 12727 12728 return (0); 12729 } 12730 12731 static void 12732 dtrace_enabling_retract(dtrace_state_t *state) 12733 { 12734 dtrace_enabling_t *enab, *next; 12735 12736 ASSERT(MUTEX_HELD(&dtrace_lock)); 12737 12738 /* 12739 * Iterate over all retained enablings, destroy the enablings retained 12740 * for the specified state. 12741 */ 12742 for (enab = dtrace_retained; enab != NULL; enab = next) { 12743 next = enab->dten_next; 12744 12745 /* 12746 * dtvs_state can only be NULL for helper enablings -- and 12747 * helper enablings can't be retained. 12748 */ 12749 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12750 12751 if (enab->dten_vstate->dtvs_state == state) { 12752 ASSERT(state->dts_nretained > 0); 12753 dtrace_enabling_destroy(enab); 12754 } 12755 } 12756 12757 ASSERT(state->dts_nretained == 0); 12758 } 12759 12760 static int 12761 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12762 { 12763 int i = 0; 12764 int matched = 0; 12765 12766 ASSERT(MUTEX_HELD(&cpu_lock)); 12767 ASSERT(MUTEX_HELD(&dtrace_lock)); 12768 12769 for (i = 0; i < enab->dten_ndesc; i++) { 12770 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12771 12772 enab->dten_current = ep; 12773 enab->dten_error = 0; 12774 12775 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12776 12777 if (enab->dten_error != 0) { 12778 /* 12779 * If we get an error half-way through enabling the 12780 * probes, we kick out -- perhaps with some number of 12781 * them enabled. Leaving enabled probes enabled may 12782 * be slightly confusing for user-level, but we expect 12783 * that no one will attempt to actually drive on in 12784 * the face of such errors. If this is an anonymous 12785 * enabling (indicated with a NULL nmatched pointer), 12786 * we cmn_err() a message. We aren't expecting to 12787 * get such an error -- such as it can exist at all, 12788 * it would be a result of corrupted DOF in the driver 12789 * properties. 12790 */ 12791 if (nmatched == NULL) { 12792 cmn_err(CE_WARN, "dtrace_enabling_match() " 12793 "error on %p: %d", (void *)ep, 12794 enab->dten_error); 12795 } 12796 12797 return (enab->dten_error); 12798 } 12799 } 12800 12801 enab->dten_probegen = dtrace_probegen; 12802 if (nmatched != NULL) 12803 *nmatched = matched; 12804 12805 return (0); 12806 } 12807 12808 static void 12809 dtrace_enabling_matchall(void) 12810 { 12811 dtrace_enabling_t *enab; 12812 12813 mutex_enter(&cpu_lock); 12814 mutex_enter(&dtrace_lock); 12815 12816 /* 12817 * Iterate over all retained enablings to see if any probes match 12818 * against them. We only perform this operation on enablings for which 12819 * we have sufficient permissions by virtue of being in the global zone 12820 * or in the same zone as the DTrace client. Because we can be called 12821 * after dtrace_detach() has been called, we cannot assert that there 12822 * are retained enablings. We can safely load from dtrace_retained, 12823 * however: the taskq_destroy() at the end of dtrace_detach() will 12824 * block pending our completion. 12825 */ 12826 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12827 #ifdef illumos 12828 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12829 12830 if (INGLOBALZONE(curproc) || 12831 cr != NULL && getzoneid() == crgetzoneid(cr)) 12832 #endif 12833 (void) dtrace_enabling_match(enab, NULL); 12834 } 12835 12836 mutex_exit(&dtrace_lock); 12837 mutex_exit(&cpu_lock); 12838 } 12839 12840 /* 12841 * If an enabling is to be enabled without having matched probes (that is, if 12842 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 12843 * enabling must be _primed_ by creating an ECB for every ECB description. 12844 * This must be done to assure that we know the number of speculations, the 12845 * number of aggregations, the minimum buffer size needed, etc. before we 12846 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 12847 * enabling any probes, we create ECBs for every ECB decription, but with a 12848 * NULL probe -- which is exactly what this function does. 12849 */ 12850 static void 12851 dtrace_enabling_prime(dtrace_state_t *state) 12852 { 12853 dtrace_enabling_t *enab; 12854 int i; 12855 12856 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12857 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12858 12859 if (enab->dten_vstate->dtvs_state != state) 12860 continue; 12861 12862 /* 12863 * We don't want to prime an enabling more than once, lest 12864 * we allow a malicious user to induce resource exhaustion. 12865 * (The ECBs that result from priming an enabling aren't 12866 * leaked -- but they also aren't deallocated until the 12867 * consumer state is destroyed.) 12868 */ 12869 if (enab->dten_primed) 12870 continue; 12871 12872 for (i = 0; i < enab->dten_ndesc; i++) { 12873 enab->dten_current = enab->dten_desc[i]; 12874 (void) dtrace_probe_enable(NULL, enab); 12875 } 12876 12877 enab->dten_primed = 1; 12878 } 12879 } 12880 12881 /* 12882 * Called to indicate that probes should be provided due to retained 12883 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 12884 * must take an initial lap through the enabling calling the dtps_provide() 12885 * entry point explicitly to allow for autocreated probes. 12886 */ 12887 static void 12888 dtrace_enabling_provide(dtrace_provider_t *prv) 12889 { 12890 int i, all = 0; 12891 dtrace_probedesc_t desc; 12892 dtrace_genid_t gen; 12893 12894 ASSERT(MUTEX_HELD(&dtrace_lock)); 12895 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 12896 12897 if (prv == NULL) { 12898 all = 1; 12899 prv = dtrace_provider; 12900 } 12901 12902 do { 12903 dtrace_enabling_t *enab; 12904 void *parg = prv->dtpv_arg; 12905 12906 retry: 12907 gen = dtrace_retained_gen; 12908 for (enab = dtrace_retained; enab != NULL; 12909 enab = enab->dten_next) { 12910 for (i = 0; i < enab->dten_ndesc; i++) { 12911 desc = enab->dten_desc[i]->dted_probe; 12912 mutex_exit(&dtrace_lock); 12913 prv->dtpv_pops.dtps_provide(parg, &desc); 12914 mutex_enter(&dtrace_lock); 12915 /* 12916 * Process the retained enablings again if 12917 * they have changed while we weren't holding 12918 * dtrace_lock. 12919 */ 12920 if (gen != dtrace_retained_gen) 12921 goto retry; 12922 } 12923 } 12924 } while (all && (prv = prv->dtpv_next) != NULL); 12925 12926 mutex_exit(&dtrace_lock); 12927 dtrace_probe_provide(NULL, all ? NULL : prv); 12928 mutex_enter(&dtrace_lock); 12929 } 12930 12931 /* 12932 * Called to reap ECBs that are attached to probes from defunct providers. 12933 */ 12934 static void 12935 dtrace_enabling_reap(void) 12936 { 12937 dtrace_provider_t *prov; 12938 dtrace_probe_t *probe; 12939 dtrace_ecb_t *ecb; 12940 hrtime_t when; 12941 int i; 12942 12943 mutex_enter(&cpu_lock); 12944 mutex_enter(&dtrace_lock); 12945 12946 for (i = 0; i < dtrace_nprobes; i++) { 12947 if ((probe = dtrace_probes[i]) == NULL) 12948 continue; 12949 12950 if (probe->dtpr_ecb == NULL) 12951 continue; 12952 12953 prov = probe->dtpr_provider; 12954 12955 if ((when = prov->dtpv_defunct) == 0) 12956 continue; 12957 12958 /* 12959 * We have ECBs on a defunct provider: we want to reap these 12960 * ECBs to allow the provider to unregister. The destruction 12961 * of these ECBs must be done carefully: if we destroy the ECB 12962 * and the consumer later wishes to consume an EPID that 12963 * corresponds to the destroyed ECB (and if the EPID metadata 12964 * has not been previously consumed), the consumer will abort 12965 * processing on the unknown EPID. To reduce (but not, sadly, 12966 * eliminate) the possibility of this, we will only destroy an 12967 * ECB for a defunct provider if, for the state that 12968 * corresponds to the ECB: 12969 * 12970 * (a) There is no speculative tracing (which can effectively 12971 * cache an EPID for an arbitrary amount of time). 12972 * 12973 * (b) The principal buffers have been switched twice since the 12974 * provider became defunct. 12975 * 12976 * (c) The aggregation buffers are of zero size or have been 12977 * switched twice since the provider became defunct. 12978 * 12979 * We use dts_speculates to determine (a) and call a function 12980 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 12981 * that as soon as we've been unable to destroy one of the ECBs 12982 * associated with the probe, we quit trying -- reaping is only 12983 * fruitful in as much as we can destroy all ECBs associated 12984 * with the defunct provider's probes. 12985 */ 12986 while ((ecb = probe->dtpr_ecb) != NULL) { 12987 dtrace_state_t *state = ecb->dte_state; 12988 dtrace_buffer_t *buf = state->dts_buffer; 12989 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 12990 12991 if (state->dts_speculates) 12992 break; 12993 12994 if (!dtrace_buffer_consumed(buf, when)) 12995 break; 12996 12997 if (!dtrace_buffer_consumed(aggbuf, when)) 12998 break; 12999 13000 dtrace_ecb_disable(ecb); 13001 ASSERT(probe->dtpr_ecb != ecb); 13002 dtrace_ecb_destroy(ecb); 13003 } 13004 } 13005 13006 mutex_exit(&dtrace_lock); 13007 mutex_exit(&cpu_lock); 13008 } 13009 13010 /* 13011 * DTrace DOF Functions 13012 */ 13013 /*ARGSUSED*/ 13014 static void 13015 dtrace_dof_error(dof_hdr_t *dof, const char *str) 13016 { 13017 if (dtrace_err_verbose) 13018 cmn_err(CE_WARN, "failed to process DOF: %s", str); 13019 13020 #ifdef DTRACE_ERRDEBUG 13021 dtrace_errdebug(str); 13022 #endif 13023 } 13024 13025 /* 13026 * Create DOF out of a currently enabled state. Right now, we only create 13027 * DOF containing the run-time options -- but this could be expanded to create 13028 * complete DOF representing the enabled state. 13029 */ 13030 static dof_hdr_t * 13031 dtrace_dof_create(dtrace_state_t *state) 13032 { 13033 dof_hdr_t *dof; 13034 dof_sec_t *sec; 13035 dof_optdesc_t *opt; 13036 int i, len = sizeof (dof_hdr_t) + 13037 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 13038 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13039 13040 ASSERT(MUTEX_HELD(&dtrace_lock)); 13041 13042 dof = kmem_zalloc(len, KM_SLEEP); 13043 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 13044 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 13045 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 13046 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 13047 13048 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 13049 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 13050 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 13051 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 13052 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 13053 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 13054 13055 dof->dofh_flags = 0; 13056 dof->dofh_hdrsize = sizeof (dof_hdr_t); 13057 dof->dofh_secsize = sizeof (dof_sec_t); 13058 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 13059 dof->dofh_secoff = sizeof (dof_hdr_t); 13060 dof->dofh_loadsz = len; 13061 dof->dofh_filesz = len; 13062 dof->dofh_pad = 0; 13063 13064 /* 13065 * Fill in the option section header... 13066 */ 13067 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 13068 sec->dofs_type = DOF_SECT_OPTDESC; 13069 sec->dofs_align = sizeof (uint64_t); 13070 sec->dofs_flags = DOF_SECF_LOAD; 13071 sec->dofs_entsize = sizeof (dof_optdesc_t); 13072 13073 opt = (dof_optdesc_t *)((uintptr_t)sec + 13074 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 13075 13076 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 13077 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 13078 13079 for (i = 0; i < DTRACEOPT_MAX; i++) { 13080 opt[i].dofo_option = i; 13081 opt[i].dofo_strtab = DOF_SECIDX_NONE; 13082 opt[i].dofo_value = state->dts_options[i]; 13083 } 13084 13085 return (dof); 13086 } 13087 13088 static dof_hdr_t * 13089 dtrace_dof_copyin(uintptr_t uarg, int *errp) 13090 { 13091 dof_hdr_t hdr, *dof; 13092 13093 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13094 13095 /* 13096 * First, we're going to copyin() the sizeof (dof_hdr_t). 13097 */ 13098 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 13099 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13100 *errp = EFAULT; 13101 return (NULL); 13102 } 13103 13104 /* 13105 * Now we'll allocate the entire DOF and copy it in -- provided 13106 * that the length isn't outrageous. 13107 */ 13108 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13109 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13110 *errp = E2BIG; 13111 return (NULL); 13112 } 13113 13114 if (hdr.dofh_loadsz < sizeof (hdr)) { 13115 dtrace_dof_error(&hdr, "invalid load size"); 13116 *errp = EINVAL; 13117 return (NULL); 13118 } 13119 13120 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 13121 13122 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 13123 dof->dofh_loadsz != hdr.dofh_loadsz) { 13124 kmem_free(dof, hdr.dofh_loadsz); 13125 *errp = EFAULT; 13126 return (NULL); 13127 } 13128 13129 return (dof); 13130 } 13131 13132 #ifdef __FreeBSD__ 13133 static dof_hdr_t * 13134 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp) 13135 { 13136 dof_hdr_t hdr, *dof; 13137 struct thread *td; 13138 size_t loadsz; 13139 13140 ASSERT(!MUTEX_HELD(&dtrace_lock)); 13141 13142 td = curthread; 13143 13144 /* 13145 * First, we're going to copyin() the sizeof (dof_hdr_t). 13146 */ 13147 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) { 13148 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13149 *errp = EFAULT; 13150 return (NULL); 13151 } 13152 13153 /* 13154 * Now we'll allocate the entire DOF and copy it in -- provided 13155 * that the length isn't outrageous. 13156 */ 13157 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13158 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13159 *errp = E2BIG; 13160 return (NULL); 13161 } 13162 loadsz = (size_t)hdr.dofh_loadsz; 13163 13164 if (loadsz < sizeof (hdr)) { 13165 dtrace_dof_error(&hdr, "invalid load size"); 13166 *errp = EINVAL; 13167 return (NULL); 13168 } 13169 13170 dof = kmem_alloc(loadsz, KM_SLEEP); 13171 13172 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz || 13173 dof->dofh_loadsz != loadsz) { 13174 kmem_free(dof, hdr.dofh_loadsz); 13175 *errp = EFAULT; 13176 return (NULL); 13177 } 13178 13179 return (dof); 13180 } 13181 13182 static __inline uchar_t 13183 dtrace_dof_char(char c) 13184 { 13185 13186 switch (c) { 13187 case '0': 13188 case '1': 13189 case '2': 13190 case '3': 13191 case '4': 13192 case '5': 13193 case '6': 13194 case '7': 13195 case '8': 13196 case '9': 13197 return (c - '0'); 13198 case 'A': 13199 case 'B': 13200 case 'C': 13201 case 'D': 13202 case 'E': 13203 case 'F': 13204 return (c - 'A' + 10); 13205 case 'a': 13206 case 'b': 13207 case 'c': 13208 case 'd': 13209 case 'e': 13210 case 'f': 13211 return (c - 'a' + 10); 13212 } 13213 /* Should not reach here. */ 13214 return (UCHAR_MAX); 13215 } 13216 #endif /* __FreeBSD__ */ 13217 13218 static dof_hdr_t * 13219 dtrace_dof_property(const char *name) 13220 { 13221 #ifdef __FreeBSD__ 13222 uint8_t *dofbuf; 13223 u_char *data, *eol; 13224 caddr_t doffile; 13225 size_t bytes, len, i; 13226 dof_hdr_t *dof; 13227 u_char c1, c2; 13228 13229 dof = NULL; 13230 13231 doffile = preload_search_by_type("dtrace_dof"); 13232 if (doffile == NULL) 13233 return (NULL); 13234 13235 data = preload_fetch_addr(doffile); 13236 len = preload_fetch_size(doffile); 13237 for (;;) { 13238 /* Look for the end of the line. All lines end in a newline. */ 13239 eol = memchr(data, '\n', len); 13240 if (eol == NULL) 13241 return (NULL); 13242 13243 if (strncmp(name, data, strlen(name)) == 0) 13244 break; 13245 13246 eol++; /* skip past the newline */ 13247 len -= eol - data; 13248 data = eol; 13249 } 13250 13251 /* We've found the data corresponding to the specified key. */ 13252 13253 data += strlen(name) + 1; /* skip past the '=' */ 13254 len = eol - data; 13255 bytes = len / 2; 13256 13257 if (bytes < sizeof(dof_hdr_t)) { 13258 dtrace_dof_error(NULL, "truncated header"); 13259 goto doferr; 13260 } 13261 13262 /* 13263 * Each byte is represented by the two ASCII characters in its hex 13264 * representation. 13265 */ 13266 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK); 13267 for (i = 0; i < bytes; i++) { 13268 c1 = dtrace_dof_char(data[i * 2]); 13269 c2 = dtrace_dof_char(data[i * 2 + 1]); 13270 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) { 13271 dtrace_dof_error(NULL, "invalid hex char in DOF"); 13272 goto doferr; 13273 } 13274 dofbuf[i] = c1 * 16 + c2; 13275 } 13276 13277 dof = (dof_hdr_t *)dofbuf; 13278 if (bytes < dof->dofh_loadsz) { 13279 dtrace_dof_error(NULL, "truncated DOF"); 13280 goto doferr; 13281 } 13282 13283 if (dof->dofh_loadsz >= dtrace_dof_maxsize) { 13284 dtrace_dof_error(NULL, "oversized DOF"); 13285 goto doferr; 13286 } 13287 13288 return (dof); 13289 13290 doferr: 13291 free(dof, M_SOLARIS); 13292 return (NULL); 13293 #else /* __FreeBSD__ */ 13294 uchar_t *buf; 13295 uint64_t loadsz; 13296 unsigned int len, i; 13297 dof_hdr_t *dof; 13298 13299 /* 13300 * Unfortunately, array of values in .conf files are always (and 13301 * only) interpreted to be integer arrays. We must read our DOF 13302 * as an integer array, and then squeeze it into a byte array. 13303 */ 13304 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13305 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13306 return (NULL); 13307 13308 for (i = 0; i < len; i++) 13309 buf[i] = (uchar_t)(((int *)buf)[i]); 13310 13311 if (len < sizeof (dof_hdr_t)) { 13312 ddi_prop_free(buf); 13313 dtrace_dof_error(NULL, "truncated header"); 13314 return (NULL); 13315 } 13316 13317 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13318 ddi_prop_free(buf); 13319 dtrace_dof_error(NULL, "truncated DOF"); 13320 return (NULL); 13321 } 13322 13323 if (loadsz >= dtrace_dof_maxsize) { 13324 ddi_prop_free(buf); 13325 dtrace_dof_error(NULL, "oversized DOF"); 13326 return (NULL); 13327 } 13328 13329 dof = kmem_alloc(loadsz, KM_SLEEP); 13330 bcopy(buf, dof, loadsz); 13331 ddi_prop_free(buf); 13332 13333 return (dof); 13334 #endif /* !__FreeBSD__ */ 13335 } 13336 13337 static void 13338 dtrace_dof_destroy(dof_hdr_t *dof) 13339 { 13340 kmem_free(dof, dof->dofh_loadsz); 13341 } 13342 13343 /* 13344 * Return the dof_sec_t pointer corresponding to a given section index. If the 13345 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13346 * a type other than DOF_SECT_NONE is specified, the header is checked against 13347 * this type and NULL is returned if the types do not match. 13348 */ 13349 static dof_sec_t * 13350 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13351 { 13352 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13353 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13354 13355 if (i >= dof->dofh_secnum) { 13356 dtrace_dof_error(dof, "referenced section index is invalid"); 13357 return (NULL); 13358 } 13359 13360 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13361 dtrace_dof_error(dof, "referenced section is not loadable"); 13362 return (NULL); 13363 } 13364 13365 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13366 dtrace_dof_error(dof, "referenced section is the wrong type"); 13367 return (NULL); 13368 } 13369 13370 return (sec); 13371 } 13372 13373 static dtrace_probedesc_t * 13374 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13375 { 13376 dof_probedesc_t *probe; 13377 dof_sec_t *strtab; 13378 uintptr_t daddr = (uintptr_t)dof; 13379 uintptr_t str; 13380 size_t size; 13381 13382 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13383 dtrace_dof_error(dof, "invalid probe section"); 13384 return (NULL); 13385 } 13386 13387 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13388 dtrace_dof_error(dof, "bad alignment in probe description"); 13389 return (NULL); 13390 } 13391 13392 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13393 dtrace_dof_error(dof, "truncated probe description"); 13394 return (NULL); 13395 } 13396 13397 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13398 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13399 13400 if (strtab == NULL) 13401 return (NULL); 13402 13403 str = daddr + strtab->dofs_offset; 13404 size = strtab->dofs_size; 13405 13406 if (probe->dofp_provider >= strtab->dofs_size) { 13407 dtrace_dof_error(dof, "corrupt probe provider"); 13408 return (NULL); 13409 } 13410 13411 (void) strncpy(desc->dtpd_provider, 13412 (char *)(str + probe->dofp_provider), 13413 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13414 13415 if (probe->dofp_mod >= strtab->dofs_size) { 13416 dtrace_dof_error(dof, "corrupt probe module"); 13417 return (NULL); 13418 } 13419 13420 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13421 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13422 13423 if (probe->dofp_func >= strtab->dofs_size) { 13424 dtrace_dof_error(dof, "corrupt probe function"); 13425 return (NULL); 13426 } 13427 13428 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13429 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13430 13431 if (probe->dofp_name >= strtab->dofs_size) { 13432 dtrace_dof_error(dof, "corrupt probe name"); 13433 return (NULL); 13434 } 13435 13436 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13437 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13438 13439 return (desc); 13440 } 13441 13442 static dtrace_difo_t * 13443 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13444 cred_t *cr) 13445 { 13446 dtrace_difo_t *dp; 13447 size_t ttl = 0; 13448 dof_difohdr_t *dofd; 13449 uintptr_t daddr = (uintptr_t)dof; 13450 size_t max = dtrace_difo_maxsize; 13451 int i, l, n; 13452 13453 static const struct { 13454 int section; 13455 int bufoffs; 13456 int lenoffs; 13457 int entsize; 13458 int align; 13459 const char *msg; 13460 } difo[] = { 13461 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13462 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13463 sizeof (dif_instr_t), "multiple DIF sections" }, 13464 13465 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13466 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13467 sizeof (uint64_t), "multiple integer tables" }, 13468 13469 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13470 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13471 sizeof (char), "multiple string tables" }, 13472 13473 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13474 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13475 sizeof (uint_t), "multiple variable tables" }, 13476 13477 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13478 }; 13479 13480 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13481 dtrace_dof_error(dof, "invalid DIFO header section"); 13482 return (NULL); 13483 } 13484 13485 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13486 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13487 return (NULL); 13488 } 13489 13490 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13491 sec->dofs_size % sizeof (dof_secidx_t)) { 13492 dtrace_dof_error(dof, "bad size in DIFO header"); 13493 return (NULL); 13494 } 13495 13496 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13497 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13498 13499 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13500 dp->dtdo_rtype = dofd->dofd_rtype; 13501 13502 for (l = 0; l < n; l++) { 13503 dof_sec_t *subsec; 13504 void **bufp; 13505 uint32_t *lenp; 13506 13507 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13508 dofd->dofd_links[l])) == NULL) 13509 goto err; /* invalid section link */ 13510 13511 if (ttl + subsec->dofs_size > max) { 13512 dtrace_dof_error(dof, "exceeds maximum size"); 13513 goto err; 13514 } 13515 13516 ttl += subsec->dofs_size; 13517 13518 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13519 if (subsec->dofs_type != difo[i].section) 13520 continue; 13521 13522 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13523 dtrace_dof_error(dof, "section not loaded"); 13524 goto err; 13525 } 13526 13527 if (subsec->dofs_align != difo[i].align) { 13528 dtrace_dof_error(dof, "bad alignment"); 13529 goto err; 13530 } 13531 13532 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13533 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13534 13535 if (*bufp != NULL) { 13536 dtrace_dof_error(dof, difo[i].msg); 13537 goto err; 13538 } 13539 13540 if (difo[i].entsize != subsec->dofs_entsize) { 13541 dtrace_dof_error(dof, "entry size mismatch"); 13542 goto err; 13543 } 13544 13545 if (subsec->dofs_entsize != 0 && 13546 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13547 dtrace_dof_error(dof, "corrupt entry size"); 13548 goto err; 13549 } 13550 13551 *lenp = subsec->dofs_size; 13552 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13553 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13554 *bufp, subsec->dofs_size); 13555 13556 if (subsec->dofs_entsize != 0) 13557 *lenp /= subsec->dofs_entsize; 13558 13559 break; 13560 } 13561 13562 /* 13563 * If we encounter a loadable DIFO sub-section that is not 13564 * known to us, assume this is a broken program and fail. 13565 */ 13566 if (difo[i].section == DOF_SECT_NONE && 13567 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13568 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13569 goto err; 13570 } 13571 } 13572 13573 if (dp->dtdo_buf == NULL) { 13574 /* 13575 * We can't have a DIF object without DIF text. 13576 */ 13577 dtrace_dof_error(dof, "missing DIF text"); 13578 goto err; 13579 } 13580 13581 /* 13582 * Before we validate the DIF object, run through the variable table 13583 * looking for the strings -- if any of their size are under, we'll set 13584 * their size to be the system-wide default string size. Note that 13585 * this should _not_ happen if the "strsize" option has been set -- 13586 * in this case, the compiler should have set the size to reflect the 13587 * setting of the option. 13588 */ 13589 for (i = 0; i < dp->dtdo_varlen; i++) { 13590 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13591 dtrace_diftype_t *t = &v->dtdv_type; 13592 13593 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13594 continue; 13595 13596 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13597 t->dtdt_size = dtrace_strsize_default; 13598 } 13599 13600 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13601 goto err; 13602 13603 dtrace_difo_init(dp, vstate); 13604 return (dp); 13605 13606 err: 13607 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13608 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13609 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13610 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13611 13612 kmem_free(dp, sizeof (dtrace_difo_t)); 13613 return (NULL); 13614 } 13615 13616 static dtrace_predicate_t * 13617 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13618 cred_t *cr) 13619 { 13620 dtrace_difo_t *dp; 13621 13622 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13623 return (NULL); 13624 13625 return (dtrace_predicate_create(dp)); 13626 } 13627 13628 static dtrace_actdesc_t * 13629 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13630 cred_t *cr) 13631 { 13632 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13633 dof_actdesc_t *desc; 13634 dof_sec_t *difosec; 13635 size_t offs; 13636 uintptr_t daddr = (uintptr_t)dof; 13637 uint64_t arg; 13638 dtrace_actkind_t kind; 13639 13640 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13641 dtrace_dof_error(dof, "invalid action section"); 13642 return (NULL); 13643 } 13644 13645 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13646 dtrace_dof_error(dof, "truncated action description"); 13647 return (NULL); 13648 } 13649 13650 if (sec->dofs_align != sizeof (uint64_t)) { 13651 dtrace_dof_error(dof, "bad alignment in action description"); 13652 return (NULL); 13653 } 13654 13655 if (sec->dofs_size < sec->dofs_entsize) { 13656 dtrace_dof_error(dof, "section entry size exceeds total size"); 13657 return (NULL); 13658 } 13659 13660 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13661 dtrace_dof_error(dof, "bad entry size in action description"); 13662 return (NULL); 13663 } 13664 13665 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13666 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13667 return (NULL); 13668 } 13669 13670 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13671 desc = (dof_actdesc_t *)(daddr + 13672 (uintptr_t)sec->dofs_offset + offs); 13673 kind = (dtrace_actkind_t)desc->dofa_kind; 13674 13675 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13676 (kind != DTRACEACT_PRINTA || 13677 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13678 (kind == DTRACEACT_DIFEXPR && 13679 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13680 dof_sec_t *strtab; 13681 char *str, *fmt; 13682 uint64_t i; 13683 13684 /* 13685 * The argument to these actions is an index into the 13686 * DOF string table. For printf()-like actions, this 13687 * is the format string. For print(), this is the 13688 * CTF type of the expression result. 13689 */ 13690 if ((strtab = dtrace_dof_sect(dof, 13691 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13692 goto err; 13693 13694 str = (char *)((uintptr_t)dof + 13695 (uintptr_t)strtab->dofs_offset); 13696 13697 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13698 if (str[i] == '\0') 13699 break; 13700 } 13701 13702 if (i >= strtab->dofs_size) { 13703 dtrace_dof_error(dof, "bogus format string"); 13704 goto err; 13705 } 13706 13707 if (i == desc->dofa_arg) { 13708 dtrace_dof_error(dof, "empty format string"); 13709 goto err; 13710 } 13711 13712 i -= desc->dofa_arg; 13713 fmt = kmem_alloc(i + 1, KM_SLEEP); 13714 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13715 arg = (uint64_t)(uintptr_t)fmt; 13716 } else { 13717 if (kind == DTRACEACT_PRINTA) { 13718 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13719 arg = 0; 13720 } else { 13721 arg = desc->dofa_arg; 13722 } 13723 } 13724 13725 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13726 desc->dofa_uarg, arg); 13727 13728 if (last != NULL) { 13729 last->dtad_next = act; 13730 } else { 13731 first = act; 13732 } 13733 13734 last = act; 13735 13736 if (desc->dofa_difo == DOF_SECIDX_NONE) 13737 continue; 13738 13739 if ((difosec = dtrace_dof_sect(dof, 13740 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13741 goto err; 13742 13743 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13744 13745 if (act->dtad_difo == NULL) 13746 goto err; 13747 } 13748 13749 ASSERT(first != NULL); 13750 return (first); 13751 13752 err: 13753 for (act = first; act != NULL; act = next) { 13754 next = act->dtad_next; 13755 dtrace_actdesc_release(act, vstate); 13756 } 13757 13758 return (NULL); 13759 } 13760 13761 static dtrace_ecbdesc_t * 13762 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13763 cred_t *cr) 13764 { 13765 dtrace_ecbdesc_t *ep; 13766 dof_ecbdesc_t *ecb; 13767 dtrace_probedesc_t *desc; 13768 dtrace_predicate_t *pred = NULL; 13769 13770 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13771 dtrace_dof_error(dof, "truncated ECB description"); 13772 return (NULL); 13773 } 13774 13775 if (sec->dofs_align != sizeof (uint64_t)) { 13776 dtrace_dof_error(dof, "bad alignment in ECB description"); 13777 return (NULL); 13778 } 13779 13780 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13781 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13782 13783 if (sec == NULL) 13784 return (NULL); 13785 13786 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13787 ep->dted_uarg = ecb->dofe_uarg; 13788 desc = &ep->dted_probe; 13789 13790 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13791 goto err; 13792 13793 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13794 if ((sec = dtrace_dof_sect(dof, 13795 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13796 goto err; 13797 13798 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13799 goto err; 13800 13801 ep->dted_pred.dtpdd_predicate = pred; 13802 } 13803 13804 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13805 if ((sec = dtrace_dof_sect(dof, 13806 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13807 goto err; 13808 13809 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13810 13811 if (ep->dted_action == NULL) 13812 goto err; 13813 } 13814 13815 return (ep); 13816 13817 err: 13818 if (pred != NULL) 13819 dtrace_predicate_release(pred, vstate); 13820 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13821 return (NULL); 13822 } 13823 13824 /* 13825 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13826 * specified DOF. At present, this amounts to simply adding 'ubase' to the 13827 * site of any user SETX relocations to account for load object base address. 13828 * In the future, if we need other relocations, this function can be extended. 13829 */ 13830 static int 13831 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 13832 { 13833 uintptr_t daddr = (uintptr_t)dof; 13834 dof_relohdr_t *dofr = 13835 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13836 dof_sec_t *ss, *rs, *ts; 13837 dof_relodesc_t *r; 13838 uint_t i, n; 13839 13840 if (sec->dofs_size < sizeof (dof_relohdr_t) || 13841 sec->dofs_align != sizeof (dof_secidx_t)) { 13842 dtrace_dof_error(dof, "invalid relocation header"); 13843 return (-1); 13844 } 13845 13846 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 13847 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 13848 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 13849 13850 if (ss == NULL || rs == NULL || ts == NULL) 13851 return (-1); /* dtrace_dof_error() has been called already */ 13852 13853 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 13854 rs->dofs_align != sizeof (uint64_t)) { 13855 dtrace_dof_error(dof, "invalid relocation section"); 13856 return (-1); 13857 } 13858 13859 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 13860 n = rs->dofs_size / rs->dofs_entsize; 13861 13862 for (i = 0; i < n; i++) { 13863 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 13864 13865 switch (r->dofr_type) { 13866 case DOF_RELO_NONE: 13867 break; 13868 case DOF_RELO_SETX: 13869 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 13870 sizeof (uint64_t) > ts->dofs_size) { 13871 dtrace_dof_error(dof, "bad relocation offset"); 13872 return (-1); 13873 } 13874 13875 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 13876 dtrace_dof_error(dof, "misaligned setx relo"); 13877 return (-1); 13878 } 13879 13880 *(uint64_t *)taddr += ubase; 13881 break; 13882 default: 13883 dtrace_dof_error(dof, "invalid relocation type"); 13884 return (-1); 13885 } 13886 13887 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 13888 } 13889 13890 return (0); 13891 } 13892 13893 /* 13894 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 13895 * header: it should be at the front of a memory region that is at least 13896 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 13897 * size. It need not be validated in any other way. 13898 */ 13899 static int 13900 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 13901 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 13902 { 13903 uint64_t len = dof->dofh_loadsz, seclen; 13904 uintptr_t daddr = (uintptr_t)dof; 13905 dtrace_ecbdesc_t *ep; 13906 dtrace_enabling_t *enab; 13907 uint_t i; 13908 13909 ASSERT(MUTEX_HELD(&dtrace_lock)); 13910 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 13911 13912 /* 13913 * Check the DOF header identification bytes. In addition to checking 13914 * valid settings, we also verify that unused bits/bytes are zeroed so 13915 * we can use them later without fear of regressing existing binaries. 13916 */ 13917 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 13918 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 13919 dtrace_dof_error(dof, "DOF magic string mismatch"); 13920 return (-1); 13921 } 13922 13923 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 13924 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 13925 dtrace_dof_error(dof, "DOF has invalid data model"); 13926 return (-1); 13927 } 13928 13929 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 13930 dtrace_dof_error(dof, "DOF encoding mismatch"); 13931 return (-1); 13932 } 13933 13934 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13935 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 13936 dtrace_dof_error(dof, "DOF version mismatch"); 13937 return (-1); 13938 } 13939 13940 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 13941 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 13942 return (-1); 13943 } 13944 13945 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 13946 dtrace_dof_error(dof, "DOF uses too many integer registers"); 13947 return (-1); 13948 } 13949 13950 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 13951 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 13952 return (-1); 13953 } 13954 13955 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 13956 if (dof->dofh_ident[i] != 0) { 13957 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 13958 return (-1); 13959 } 13960 } 13961 13962 if (dof->dofh_flags & ~DOF_FL_VALID) { 13963 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 13964 return (-1); 13965 } 13966 13967 if (dof->dofh_secsize == 0) { 13968 dtrace_dof_error(dof, "zero section header size"); 13969 return (-1); 13970 } 13971 13972 /* 13973 * Check that the section headers don't exceed the amount of DOF 13974 * data. Note that we cast the section size and number of sections 13975 * to uint64_t's to prevent possible overflow in the multiplication. 13976 */ 13977 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 13978 13979 if (dof->dofh_secoff > len || seclen > len || 13980 dof->dofh_secoff + seclen > len) { 13981 dtrace_dof_error(dof, "truncated section headers"); 13982 return (-1); 13983 } 13984 13985 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 13986 dtrace_dof_error(dof, "misaligned section headers"); 13987 return (-1); 13988 } 13989 13990 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 13991 dtrace_dof_error(dof, "misaligned section size"); 13992 return (-1); 13993 } 13994 13995 /* 13996 * Take an initial pass through the section headers to be sure that 13997 * the headers don't have stray offsets. If the 'noprobes' flag is 13998 * set, do not permit sections relating to providers, probes, or args. 13999 */ 14000 for (i = 0; i < dof->dofh_secnum; i++) { 14001 dof_sec_t *sec = (dof_sec_t *)(daddr + 14002 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14003 14004 if (noprobes) { 14005 switch (sec->dofs_type) { 14006 case DOF_SECT_PROVIDER: 14007 case DOF_SECT_PROBES: 14008 case DOF_SECT_PRARGS: 14009 case DOF_SECT_PROFFS: 14010 dtrace_dof_error(dof, "illegal sections " 14011 "for enabling"); 14012 return (-1); 14013 } 14014 } 14015 14016 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 14017 !(sec->dofs_flags & DOF_SECF_LOAD)) { 14018 dtrace_dof_error(dof, "loadable section with load " 14019 "flag unset"); 14020 return (-1); 14021 } 14022 14023 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14024 continue; /* just ignore non-loadable sections */ 14025 14026 if (!ISP2(sec->dofs_align)) { 14027 dtrace_dof_error(dof, "bad section alignment"); 14028 return (-1); 14029 } 14030 14031 if (sec->dofs_offset & (sec->dofs_align - 1)) { 14032 dtrace_dof_error(dof, "misaligned section"); 14033 return (-1); 14034 } 14035 14036 if (sec->dofs_offset > len || sec->dofs_size > len || 14037 sec->dofs_offset + sec->dofs_size > len) { 14038 dtrace_dof_error(dof, "corrupt section header"); 14039 return (-1); 14040 } 14041 14042 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 14043 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 14044 dtrace_dof_error(dof, "non-terminating string table"); 14045 return (-1); 14046 } 14047 } 14048 14049 /* 14050 * Take a second pass through the sections and locate and perform any 14051 * relocations that are present. We do this after the first pass to 14052 * be sure that all sections have had their headers validated. 14053 */ 14054 for (i = 0; i < dof->dofh_secnum; i++) { 14055 dof_sec_t *sec = (dof_sec_t *)(daddr + 14056 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14057 14058 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 14059 continue; /* skip sections that are not loadable */ 14060 14061 switch (sec->dofs_type) { 14062 case DOF_SECT_URELHDR: 14063 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 14064 return (-1); 14065 break; 14066 } 14067 } 14068 14069 if ((enab = *enabp) == NULL) 14070 enab = *enabp = dtrace_enabling_create(vstate); 14071 14072 for (i = 0; i < dof->dofh_secnum; i++) { 14073 dof_sec_t *sec = (dof_sec_t *)(daddr + 14074 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14075 14076 if (sec->dofs_type != DOF_SECT_ECBDESC) 14077 continue; 14078 14079 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 14080 dtrace_enabling_destroy(enab); 14081 *enabp = NULL; 14082 return (-1); 14083 } 14084 14085 dtrace_enabling_add(enab, ep); 14086 } 14087 14088 return (0); 14089 } 14090 14091 /* 14092 * Process DOF for any options. This routine assumes that the DOF has been 14093 * at least processed by dtrace_dof_slurp(). 14094 */ 14095 static int 14096 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 14097 { 14098 int i, rval; 14099 uint32_t entsize; 14100 size_t offs; 14101 dof_optdesc_t *desc; 14102 14103 for (i = 0; i < dof->dofh_secnum; i++) { 14104 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 14105 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 14106 14107 if (sec->dofs_type != DOF_SECT_OPTDESC) 14108 continue; 14109 14110 if (sec->dofs_align != sizeof (uint64_t)) { 14111 dtrace_dof_error(dof, "bad alignment in " 14112 "option description"); 14113 return (EINVAL); 14114 } 14115 14116 if ((entsize = sec->dofs_entsize) == 0) { 14117 dtrace_dof_error(dof, "zeroed option entry size"); 14118 return (EINVAL); 14119 } 14120 14121 if (entsize < sizeof (dof_optdesc_t)) { 14122 dtrace_dof_error(dof, "bad option entry size"); 14123 return (EINVAL); 14124 } 14125 14126 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 14127 desc = (dof_optdesc_t *)((uintptr_t)dof + 14128 (uintptr_t)sec->dofs_offset + offs); 14129 14130 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 14131 dtrace_dof_error(dof, "non-zero option string"); 14132 return (EINVAL); 14133 } 14134 14135 if (desc->dofo_value == DTRACEOPT_UNSET) { 14136 dtrace_dof_error(dof, "unset option"); 14137 return (EINVAL); 14138 } 14139 14140 if ((rval = dtrace_state_option(state, 14141 desc->dofo_option, desc->dofo_value)) != 0) { 14142 dtrace_dof_error(dof, "rejected option"); 14143 return (rval); 14144 } 14145 } 14146 } 14147 14148 return (0); 14149 } 14150 14151 /* 14152 * DTrace Consumer State Functions 14153 */ 14154 static int 14155 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 14156 { 14157 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 14158 void *base; 14159 uintptr_t limit; 14160 dtrace_dynvar_t *dvar, *next, *start; 14161 int i; 14162 14163 ASSERT(MUTEX_HELD(&dtrace_lock)); 14164 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 14165 14166 bzero(dstate, sizeof (dtrace_dstate_t)); 14167 14168 if ((dstate->dtds_chunksize = chunksize) == 0) 14169 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 14170 14171 VERIFY(dstate->dtds_chunksize < LONG_MAX); 14172 14173 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 14174 size = min; 14175 14176 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 14177 return (ENOMEM); 14178 14179 dstate->dtds_size = size; 14180 dstate->dtds_base = base; 14181 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 14182 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 14183 14184 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 14185 14186 if (hashsize != 1 && (hashsize & 1)) 14187 hashsize--; 14188 14189 dstate->dtds_hashsize = hashsize; 14190 dstate->dtds_hash = dstate->dtds_base; 14191 14192 /* 14193 * Set all of our hash buckets to point to the single sink, and (if 14194 * it hasn't already been set), set the sink's hash value to be the 14195 * sink sentinel value. The sink is needed for dynamic variable 14196 * lookups to know that they have iterated over an entire, valid hash 14197 * chain. 14198 */ 14199 for (i = 0; i < hashsize; i++) 14200 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 14201 14202 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 14203 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 14204 14205 /* 14206 * Determine number of active CPUs. Divide free list evenly among 14207 * active CPUs. 14208 */ 14209 start = (dtrace_dynvar_t *) 14210 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 14211 limit = (uintptr_t)base + size; 14212 14213 VERIFY((uintptr_t)start < limit); 14214 VERIFY((uintptr_t)start >= (uintptr_t)base); 14215 14216 maxper = (limit - (uintptr_t)start) / NCPU; 14217 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 14218 14219 #ifndef illumos 14220 CPU_FOREACH(i) { 14221 #else 14222 for (i = 0; i < NCPU; i++) { 14223 #endif 14224 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 14225 14226 /* 14227 * If we don't even have enough chunks to make it once through 14228 * NCPUs, we're just going to allocate everything to the first 14229 * CPU. And if we're on the last CPU, we're going to allocate 14230 * whatever is left over. In either case, we set the limit to 14231 * be the limit of the dynamic variable space. 14232 */ 14233 if (maxper == 0 || i == NCPU - 1) { 14234 limit = (uintptr_t)base + size; 14235 start = NULL; 14236 } else { 14237 limit = (uintptr_t)start + maxper; 14238 start = (dtrace_dynvar_t *)limit; 14239 } 14240 14241 VERIFY(limit <= (uintptr_t)base + size); 14242 14243 for (;;) { 14244 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 14245 dstate->dtds_chunksize); 14246 14247 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 14248 break; 14249 14250 VERIFY((uintptr_t)dvar >= (uintptr_t)base && 14251 (uintptr_t)dvar <= (uintptr_t)base + size); 14252 dvar->dtdv_next = next; 14253 dvar = next; 14254 } 14255 14256 if (maxper == 0) 14257 break; 14258 } 14259 14260 return (0); 14261 } 14262 14263 static void 14264 dtrace_dstate_fini(dtrace_dstate_t *dstate) 14265 { 14266 ASSERT(MUTEX_HELD(&cpu_lock)); 14267 14268 if (dstate->dtds_base == NULL) 14269 return; 14270 14271 kmem_free(dstate->dtds_base, dstate->dtds_size); 14272 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14273 } 14274 14275 static void 14276 dtrace_vstate_fini(dtrace_vstate_t *vstate) 14277 { 14278 /* 14279 * Logical XOR, where are you? 14280 */ 14281 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14282 14283 if (vstate->dtvs_nglobals > 0) { 14284 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14285 sizeof (dtrace_statvar_t *)); 14286 } 14287 14288 if (vstate->dtvs_ntlocals > 0) { 14289 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14290 sizeof (dtrace_difv_t)); 14291 } 14292 14293 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14294 14295 if (vstate->dtvs_nlocals > 0) { 14296 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14297 sizeof (dtrace_statvar_t *)); 14298 } 14299 } 14300 14301 #ifdef illumos 14302 static void 14303 dtrace_state_clean(dtrace_state_t *state) 14304 { 14305 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14306 return; 14307 14308 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14309 dtrace_speculation_clean(state); 14310 } 14311 14312 static void 14313 dtrace_state_deadman(dtrace_state_t *state) 14314 { 14315 hrtime_t now; 14316 14317 dtrace_sync(); 14318 14319 now = dtrace_gethrtime(); 14320 14321 if (state != dtrace_anon.dta_state && 14322 now - state->dts_laststatus >= dtrace_deadman_user) 14323 return; 14324 14325 /* 14326 * We must be sure that dts_alive never appears to be less than the 14327 * value upon entry to dtrace_state_deadman(), and because we lack a 14328 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14329 * store INT64_MAX to it, followed by a memory barrier, followed by 14330 * the new value. This assures that dts_alive never appears to be 14331 * less than its true value, regardless of the order in which the 14332 * stores to the underlying storage are issued. 14333 */ 14334 state->dts_alive = INT64_MAX; 14335 dtrace_membar_producer(); 14336 state->dts_alive = now; 14337 } 14338 #else /* !illumos */ 14339 static void 14340 dtrace_state_clean(void *arg) 14341 { 14342 dtrace_state_t *state = arg; 14343 dtrace_optval_t *opt = state->dts_options; 14344 14345 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14346 return; 14347 14348 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14349 dtrace_speculation_clean(state); 14350 14351 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14352 dtrace_state_clean, state); 14353 } 14354 14355 static void 14356 dtrace_state_deadman(void *arg) 14357 { 14358 dtrace_state_t *state = arg; 14359 hrtime_t now; 14360 14361 dtrace_sync(); 14362 14363 dtrace_debug_output(); 14364 14365 now = dtrace_gethrtime(); 14366 14367 if (state != dtrace_anon.dta_state && 14368 now - state->dts_laststatus >= dtrace_deadman_user) 14369 return; 14370 14371 /* 14372 * We must be sure that dts_alive never appears to be less than the 14373 * value upon entry to dtrace_state_deadman(), and because we lack a 14374 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14375 * store INT64_MAX to it, followed by a memory barrier, followed by 14376 * the new value. This assures that dts_alive never appears to be 14377 * less than its true value, regardless of the order in which the 14378 * stores to the underlying storage are issued. 14379 */ 14380 state->dts_alive = INT64_MAX; 14381 dtrace_membar_producer(); 14382 state->dts_alive = now; 14383 14384 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14385 dtrace_state_deadman, state); 14386 } 14387 #endif /* illumos */ 14388 14389 static dtrace_state_t * 14390 #ifdef illumos 14391 dtrace_state_create(dev_t *devp, cred_t *cr) 14392 #else 14393 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused) 14394 #endif 14395 { 14396 #ifdef illumos 14397 minor_t minor; 14398 major_t major; 14399 #else 14400 cred_t *cr = NULL; 14401 int m = 0; 14402 #endif 14403 char c[30]; 14404 dtrace_state_t *state; 14405 dtrace_optval_t *opt; 14406 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14407 14408 ASSERT(MUTEX_HELD(&dtrace_lock)); 14409 ASSERT(MUTEX_HELD(&cpu_lock)); 14410 14411 #ifdef illumos 14412 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14413 VM_BESTFIT | VM_SLEEP); 14414 14415 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14416 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14417 return (NULL); 14418 } 14419 14420 state = ddi_get_soft_state(dtrace_softstate, minor); 14421 #else 14422 if (dev != NULL) { 14423 cr = dev->si_cred; 14424 m = dev2unit(dev); 14425 } 14426 14427 /* Allocate memory for the state. */ 14428 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14429 #endif 14430 14431 state->dts_epid = DTRACE_EPIDNONE + 1; 14432 14433 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14434 #ifdef illumos 14435 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14436 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14437 14438 if (devp != NULL) { 14439 major = getemajor(*devp); 14440 } else { 14441 major = ddi_driver_major(dtrace_devi); 14442 } 14443 14444 state->dts_dev = makedevice(major, minor); 14445 14446 if (devp != NULL) 14447 *devp = state->dts_dev; 14448 #else 14449 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14450 state->dts_dev = dev; 14451 #endif 14452 14453 /* 14454 * We allocate NCPU buffers. On the one hand, this can be quite 14455 * a bit of memory per instance (nearly 36K on a Starcat). On the 14456 * other hand, it saves an additional memory reference in the probe 14457 * path. 14458 */ 14459 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14460 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14461 14462 #ifdef illumos 14463 state->dts_cleaner = CYCLIC_NONE; 14464 state->dts_deadman = CYCLIC_NONE; 14465 #else 14466 callout_init(&state->dts_cleaner, 1); 14467 callout_init(&state->dts_deadman, 1); 14468 #endif 14469 state->dts_vstate.dtvs_state = state; 14470 14471 for (i = 0; i < DTRACEOPT_MAX; i++) 14472 state->dts_options[i] = DTRACEOPT_UNSET; 14473 14474 /* 14475 * Set the default options. 14476 */ 14477 opt = state->dts_options; 14478 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14479 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14480 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14481 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14482 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14483 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14484 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14485 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14486 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14487 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14488 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14489 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14490 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14491 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14492 14493 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14494 14495 /* 14496 * Depending on the user credentials, we set flag bits which alter probe 14497 * visibility or the amount of destructiveness allowed. In the case of 14498 * actual anonymous tracing, or the possession of all privileges, all of 14499 * the normal checks are bypassed. 14500 */ 14501 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14502 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14503 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14504 } else { 14505 /* 14506 * Set up the credentials for this instantiation. We take a 14507 * hold on the credential to prevent it from disappearing on 14508 * us; this in turn prevents the zone_t referenced by this 14509 * credential from disappearing. This means that we can 14510 * examine the credential and the zone from probe context. 14511 */ 14512 crhold(cr); 14513 state->dts_cred.dcr_cred = cr; 14514 14515 /* 14516 * CRA_PROC means "we have *some* privilege for dtrace" and 14517 * unlocks the use of variables like pid, zonename, etc. 14518 */ 14519 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14520 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14521 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14522 } 14523 14524 /* 14525 * dtrace_user allows use of syscall and profile providers. 14526 * If the user also has proc_owner and/or proc_zone, we 14527 * extend the scope to include additional visibility and 14528 * destructive power. 14529 */ 14530 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14531 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14532 state->dts_cred.dcr_visible |= 14533 DTRACE_CRV_ALLPROC; 14534 14535 state->dts_cred.dcr_action |= 14536 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14537 } 14538 14539 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14540 state->dts_cred.dcr_visible |= 14541 DTRACE_CRV_ALLZONE; 14542 14543 state->dts_cred.dcr_action |= 14544 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14545 } 14546 14547 /* 14548 * If we have all privs in whatever zone this is, 14549 * we can do destructive things to processes which 14550 * have altered credentials. 14551 */ 14552 #ifdef illumos 14553 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14554 cr->cr_zone->zone_privset)) { 14555 state->dts_cred.dcr_action |= 14556 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14557 } 14558 #endif 14559 } 14560 14561 /* 14562 * Holding the dtrace_kernel privilege also implies that 14563 * the user has the dtrace_user privilege from a visibility 14564 * perspective. But without further privileges, some 14565 * destructive actions are not available. 14566 */ 14567 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14568 /* 14569 * Make all probes in all zones visible. However, 14570 * this doesn't mean that all actions become available 14571 * to all zones. 14572 */ 14573 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14574 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14575 14576 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14577 DTRACE_CRA_PROC; 14578 /* 14579 * Holding proc_owner means that destructive actions 14580 * for *this* zone are allowed. 14581 */ 14582 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14583 state->dts_cred.dcr_action |= 14584 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14585 14586 /* 14587 * Holding proc_zone means that destructive actions 14588 * for this user/group ID in all zones is allowed. 14589 */ 14590 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14591 state->dts_cred.dcr_action |= 14592 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14593 14594 #ifdef illumos 14595 /* 14596 * If we have all privs in whatever zone this is, 14597 * we can do destructive things to processes which 14598 * have altered credentials. 14599 */ 14600 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14601 cr->cr_zone->zone_privset)) { 14602 state->dts_cred.dcr_action |= 14603 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14604 } 14605 #endif 14606 } 14607 14608 /* 14609 * Holding the dtrace_proc privilege gives control over fasttrap 14610 * and pid providers. We need to grant wider destructive 14611 * privileges in the event that the user has proc_owner and/or 14612 * proc_zone. 14613 */ 14614 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14615 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14616 state->dts_cred.dcr_action |= 14617 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14618 14619 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14620 state->dts_cred.dcr_action |= 14621 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14622 } 14623 } 14624 14625 return (state); 14626 } 14627 14628 static int 14629 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14630 { 14631 dtrace_optval_t *opt = state->dts_options, size; 14632 processorid_t cpu = 0;; 14633 int flags = 0, rval, factor, divisor = 1; 14634 14635 ASSERT(MUTEX_HELD(&dtrace_lock)); 14636 ASSERT(MUTEX_HELD(&cpu_lock)); 14637 ASSERT(which < DTRACEOPT_MAX); 14638 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14639 (state == dtrace_anon.dta_state && 14640 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14641 14642 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14643 return (0); 14644 14645 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14646 cpu = opt[DTRACEOPT_CPU]; 14647 14648 if (which == DTRACEOPT_SPECSIZE) 14649 flags |= DTRACEBUF_NOSWITCH; 14650 14651 if (which == DTRACEOPT_BUFSIZE) { 14652 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14653 flags |= DTRACEBUF_RING; 14654 14655 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14656 flags |= DTRACEBUF_FILL; 14657 14658 if (state != dtrace_anon.dta_state || 14659 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14660 flags |= DTRACEBUF_INACTIVE; 14661 } 14662 14663 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14664 /* 14665 * The size must be 8-byte aligned. If the size is not 8-byte 14666 * aligned, drop it down by the difference. 14667 */ 14668 if (size & (sizeof (uint64_t) - 1)) 14669 size -= size & (sizeof (uint64_t) - 1); 14670 14671 if (size < state->dts_reserve) { 14672 /* 14673 * Buffers always must be large enough to accommodate 14674 * their prereserved space. We return E2BIG instead 14675 * of ENOMEM in this case to allow for user-level 14676 * software to differentiate the cases. 14677 */ 14678 return (E2BIG); 14679 } 14680 14681 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14682 14683 if (rval != ENOMEM) { 14684 opt[which] = size; 14685 return (rval); 14686 } 14687 14688 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14689 return (rval); 14690 14691 for (divisor = 2; divisor < factor; divisor <<= 1) 14692 continue; 14693 } 14694 14695 return (ENOMEM); 14696 } 14697 14698 static int 14699 dtrace_state_buffers(dtrace_state_t *state) 14700 { 14701 dtrace_speculation_t *spec = state->dts_speculations; 14702 int rval, i; 14703 14704 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14705 DTRACEOPT_BUFSIZE)) != 0) 14706 return (rval); 14707 14708 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14709 DTRACEOPT_AGGSIZE)) != 0) 14710 return (rval); 14711 14712 for (i = 0; i < state->dts_nspeculations; i++) { 14713 if ((rval = dtrace_state_buffer(state, 14714 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14715 return (rval); 14716 } 14717 14718 return (0); 14719 } 14720 14721 static void 14722 dtrace_state_prereserve(dtrace_state_t *state) 14723 { 14724 dtrace_ecb_t *ecb; 14725 dtrace_probe_t *probe; 14726 14727 state->dts_reserve = 0; 14728 14729 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14730 return; 14731 14732 /* 14733 * If our buffer policy is a "fill" buffer policy, we need to set the 14734 * prereserved space to be the space required by the END probes. 14735 */ 14736 probe = dtrace_probes[dtrace_probeid_end - 1]; 14737 ASSERT(probe != NULL); 14738 14739 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14740 if (ecb->dte_state != state) 14741 continue; 14742 14743 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14744 } 14745 } 14746 14747 static int 14748 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14749 { 14750 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14751 dtrace_speculation_t *spec; 14752 dtrace_buffer_t *buf; 14753 #ifdef illumos 14754 cyc_handler_t hdlr; 14755 cyc_time_t when; 14756 #endif 14757 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14758 dtrace_icookie_t cookie; 14759 14760 mutex_enter(&cpu_lock); 14761 mutex_enter(&dtrace_lock); 14762 14763 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14764 rval = EBUSY; 14765 goto out; 14766 } 14767 14768 /* 14769 * Before we can perform any checks, we must prime all of the 14770 * retained enablings that correspond to this state. 14771 */ 14772 dtrace_enabling_prime(state); 14773 14774 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14775 rval = EACCES; 14776 goto out; 14777 } 14778 14779 dtrace_state_prereserve(state); 14780 14781 /* 14782 * Now we want to do is try to allocate our speculations. 14783 * We do not automatically resize the number of speculations; if 14784 * this fails, we will fail the operation. 14785 */ 14786 nspec = opt[DTRACEOPT_NSPEC]; 14787 ASSERT(nspec != DTRACEOPT_UNSET); 14788 14789 if (nspec > INT_MAX) { 14790 rval = ENOMEM; 14791 goto out; 14792 } 14793 14794 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14795 KM_NOSLEEP | KM_NORMALPRI); 14796 14797 if (spec == NULL) { 14798 rval = ENOMEM; 14799 goto out; 14800 } 14801 14802 state->dts_speculations = spec; 14803 state->dts_nspeculations = (int)nspec; 14804 14805 for (i = 0; i < nspec; i++) { 14806 if ((buf = kmem_zalloc(bufsize, 14807 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 14808 rval = ENOMEM; 14809 goto err; 14810 } 14811 14812 spec[i].dtsp_buffer = buf; 14813 } 14814 14815 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 14816 if (dtrace_anon.dta_state == NULL) { 14817 rval = ENOENT; 14818 goto out; 14819 } 14820 14821 if (state->dts_necbs != 0) { 14822 rval = EALREADY; 14823 goto out; 14824 } 14825 14826 state->dts_anon = dtrace_anon_grab(); 14827 ASSERT(state->dts_anon != NULL); 14828 state = state->dts_anon; 14829 14830 /* 14831 * We want "grabanon" to be set in the grabbed state, so we'll 14832 * copy that option value from the grabbing state into the 14833 * grabbed state. 14834 */ 14835 state->dts_options[DTRACEOPT_GRABANON] = 14836 opt[DTRACEOPT_GRABANON]; 14837 14838 *cpu = dtrace_anon.dta_beganon; 14839 14840 /* 14841 * If the anonymous state is active (as it almost certainly 14842 * is if the anonymous enabling ultimately matched anything), 14843 * we don't allow any further option processing -- but we 14844 * don't return failure. 14845 */ 14846 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14847 goto out; 14848 } 14849 14850 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 14851 opt[DTRACEOPT_AGGSIZE] != 0) { 14852 if (state->dts_aggregations == NULL) { 14853 /* 14854 * We're not going to create an aggregation buffer 14855 * because we don't have any ECBs that contain 14856 * aggregations -- set this option to 0. 14857 */ 14858 opt[DTRACEOPT_AGGSIZE] = 0; 14859 } else { 14860 /* 14861 * If we have an aggregation buffer, we must also have 14862 * a buffer to use as scratch. 14863 */ 14864 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 14865 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 14866 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 14867 } 14868 } 14869 } 14870 14871 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 14872 opt[DTRACEOPT_SPECSIZE] != 0) { 14873 if (!state->dts_speculates) { 14874 /* 14875 * We're not going to create speculation buffers 14876 * because we don't have any ECBs that actually 14877 * speculate -- set the speculation size to 0. 14878 */ 14879 opt[DTRACEOPT_SPECSIZE] = 0; 14880 } 14881 } 14882 14883 /* 14884 * The bare minimum size for any buffer that we're actually going to 14885 * do anything to is sizeof (uint64_t). 14886 */ 14887 sz = sizeof (uint64_t); 14888 14889 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 14890 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 14891 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 14892 /* 14893 * A buffer size has been explicitly set to 0 (or to a size 14894 * that will be adjusted to 0) and we need the space -- we 14895 * need to return failure. We return ENOSPC to differentiate 14896 * it from failing to allocate a buffer due to failure to meet 14897 * the reserve (for which we return E2BIG). 14898 */ 14899 rval = ENOSPC; 14900 goto out; 14901 } 14902 14903 if ((rval = dtrace_state_buffers(state)) != 0) 14904 goto err; 14905 14906 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 14907 sz = dtrace_dstate_defsize; 14908 14909 do { 14910 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 14911 14912 if (rval == 0) 14913 break; 14914 14915 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14916 goto err; 14917 } while (sz >>= 1); 14918 14919 opt[DTRACEOPT_DYNVARSIZE] = sz; 14920 14921 if (rval != 0) 14922 goto err; 14923 14924 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 14925 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 14926 14927 if (opt[DTRACEOPT_CLEANRATE] == 0) 14928 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14929 14930 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 14931 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 14932 14933 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 14934 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14935 14936 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 14937 #ifdef illumos 14938 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 14939 hdlr.cyh_arg = state; 14940 hdlr.cyh_level = CY_LOW_LEVEL; 14941 14942 when.cyt_when = 0; 14943 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 14944 14945 state->dts_cleaner = cyclic_add(&hdlr, &when); 14946 14947 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 14948 hdlr.cyh_arg = state; 14949 hdlr.cyh_level = CY_LOW_LEVEL; 14950 14951 when.cyt_when = 0; 14952 when.cyt_interval = dtrace_deadman_interval; 14953 14954 state->dts_deadman = cyclic_add(&hdlr, &when); 14955 #else 14956 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14957 dtrace_state_clean, state); 14958 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14959 dtrace_state_deadman, state); 14960 #endif 14961 14962 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 14963 14964 #ifdef illumos 14965 if (state->dts_getf != 0 && 14966 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 14967 /* 14968 * We don't have kernel privs but we have at least one call 14969 * to getf(); we need to bump our zone's count, and (if 14970 * this is the first enabling to have an unprivileged call 14971 * to getf()) we need to hook into closef(). 14972 */ 14973 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 14974 14975 if (dtrace_getf++ == 0) { 14976 ASSERT(dtrace_closef == NULL); 14977 dtrace_closef = dtrace_getf_barrier; 14978 } 14979 } 14980 #endif 14981 14982 /* 14983 * Now it's time to actually fire the BEGIN probe. We need to disable 14984 * interrupts here both to record the CPU on which we fired the BEGIN 14985 * probe (the data from this CPU will be processed first at user 14986 * level) and to manually activate the buffer for this CPU. 14987 */ 14988 cookie = dtrace_interrupt_disable(); 14989 *cpu = curcpu; 14990 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 14991 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 14992 14993 dtrace_probe(dtrace_probeid_begin, 14994 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 14995 dtrace_interrupt_enable(cookie); 14996 /* 14997 * We may have had an exit action from a BEGIN probe; only change our 14998 * state to ACTIVE if we're still in WARMUP. 14999 */ 15000 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 15001 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 15002 15003 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 15004 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 15005 15006 #ifdef __FreeBSD__ 15007 /* 15008 * We enable anonymous tracing before APs are started, so we must 15009 * activate buffers using the current CPU. 15010 */ 15011 if (state == dtrace_anon.dta_state) 15012 for (int i = 0; i < NCPU; i++) 15013 dtrace_buffer_activate_cpu(state, i); 15014 else 15015 dtrace_xcall(DTRACE_CPUALL, 15016 (dtrace_xcall_t)dtrace_buffer_activate, state); 15017 #else 15018 /* 15019 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 15020 * want each CPU to transition its principal buffer out of the 15021 * INACTIVE state. Doing this assures that no CPU will suddenly begin 15022 * processing an ECB halfway down a probe's ECB chain; all CPUs will 15023 * atomically transition from processing none of a state's ECBs to 15024 * processing all of them. 15025 */ 15026 dtrace_xcall(DTRACE_CPUALL, 15027 (dtrace_xcall_t)dtrace_buffer_activate, state); 15028 #endif 15029 goto out; 15030 15031 err: 15032 dtrace_buffer_free(state->dts_buffer); 15033 dtrace_buffer_free(state->dts_aggbuffer); 15034 15035 if ((nspec = state->dts_nspeculations) == 0) { 15036 ASSERT(state->dts_speculations == NULL); 15037 goto out; 15038 } 15039 15040 spec = state->dts_speculations; 15041 ASSERT(spec != NULL); 15042 15043 for (i = 0; i < state->dts_nspeculations; i++) { 15044 if ((buf = spec[i].dtsp_buffer) == NULL) 15045 break; 15046 15047 dtrace_buffer_free(buf); 15048 kmem_free(buf, bufsize); 15049 } 15050 15051 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15052 state->dts_nspeculations = 0; 15053 state->dts_speculations = NULL; 15054 15055 out: 15056 mutex_exit(&dtrace_lock); 15057 mutex_exit(&cpu_lock); 15058 15059 return (rval); 15060 } 15061 15062 static int 15063 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 15064 { 15065 dtrace_icookie_t cookie; 15066 15067 ASSERT(MUTEX_HELD(&dtrace_lock)); 15068 15069 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 15070 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 15071 return (EINVAL); 15072 15073 /* 15074 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 15075 * to be sure that every CPU has seen it. See below for the details 15076 * on why this is done. 15077 */ 15078 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 15079 dtrace_sync(); 15080 15081 /* 15082 * By this point, it is impossible for any CPU to be still processing 15083 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 15084 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 15085 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 15086 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 15087 * iff we're in the END probe. 15088 */ 15089 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 15090 dtrace_sync(); 15091 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 15092 15093 /* 15094 * Finally, we can release the reserve and call the END probe. We 15095 * disable interrupts across calling the END probe to allow us to 15096 * return the CPU on which we actually called the END probe. This 15097 * allows user-land to be sure that this CPU's principal buffer is 15098 * processed last. 15099 */ 15100 state->dts_reserve = 0; 15101 15102 cookie = dtrace_interrupt_disable(); 15103 *cpu = curcpu; 15104 dtrace_probe(dtrace_probeid_end, 15105 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 15106 dtrace_interrupt_enable(cookie); 15107 15108 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 15109 dtrace_sync(); 15110 15111 #ifdef illumos 15112 if (state->dts_getf != 0 && 15113 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 15114 /* 15115 * We don't have kernel privs but we have at least one call 15116 * to getf(); we need to lower our zone's count, and (if 15117 * this is the last enabling to have an unprivileged call 15118 * to getf()) we need to clear the closef() hook. 15119 */ 15120 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 15121 ASSERT(dtrace_closef == dtrace_getf_barrier); 15122 ASSERT(dtrace_getf > 0); 15123 15124 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 15125 15126 if (--dtrace_getf == 0) 15127 dtrace_closef = NULL; 15128 } 15129 #endif 15130 15131 return (0); 15132 } 15133 15134 static int 15135 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 15136 dtrace_optval_t val) 15137 { 15138 ASSERT(MUTEX_HELD(&dtrace_lock)); 15139 15140 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 15141 return (EBUSY); 15142 15143 if (option >= DTRACEOPT_MAX) 15144 return (EINVAL); 15145 15146 if (option != DTRACEOPT_CPU && val < 0) 15147 return (EINVAL); 15148 15149 switch (option) { 15150 case DTRACEOPT_DESTRUCTIVE: 15151 if (dtrace_destructive_disallow) 15152 return (EACCES); 15153 15154 state->dts_cred.dcr_destructive = 1; 15155 break; 15156 15157 case DTRACEOPT_BUFSIZE: 15158 case DTRACEOPT_DYNVARSIZE: 15159 case DTRACEOPT_AGGSIZE: 15160 case DTRACEOPT_SPECSIZE: 15161 case DTRACEOPT_STRSIZE: 15162 if (val < 0) 15163 return (EINVAL); 15164 15165 if (val >= LONG_MAX) { 15166 /* 15167 * If this is an otherwise negative value, set it to 15168 * the highest multiple of 128m less than LONG_MAX. 15169 * Technically, we're adjusting the size without 15170 * regard to the buffer resizing policy, but in fact, 15171 * this has no effect -- if we set the buffer size to 15172 * ~LONG_MAX and the buffer policy is ultimately set to 15173 * be "manual", the buffer allocation is guaranteed to 15174 * fail, if only because the allocation requires two 15175 * buffers. (We set the the size to the highest 15176 * multiple of 128m because it ensures that the size 15177 * will remain a multiple of a megabyte when 15178 * repeatedly halved -- all the way down to 15m.) 15179 */ 15180 val = LONG_MAX - (1 << 27) + 1; 15181 } 15182 } 15183 15184 state->dts_options[option] = val; 15185 15186 return (0); 15187 } 15188 15189 static void 15190 dtrace_state_destroy(dtrace_state_t *state) 15191 { 15192 dtrace_ecb_t *ecb; 15193 dtrace_vstate_t *vstate = &state->dts_vstate; 15194 #ifdef illumos 15195 minor_t minor = getminor(state->dts_dev); 15196 #endif 15197 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 15198 dtrace_speculation_t *spec = state->dts_speculations; 15199 int nspec = state->dts_nspeculations; 15200 uint32_t match; 15201 15202 ASSERT(MUTEX_HELD(&dtrace_lock)); 15203 ASSERT(MUTEX_HELD(&cpu_lock)); 15204 15205 /* 15206 * First, retract any retained enablings for this state. 15207 */ 15208 dtrace_enabling_retract(state); 15209 ASSERT(state->dts_nretained == 0); 15210 15211 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 15212 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 15213 /* 15214 * We have managed to come into dtrace_state_destroy() on a 15215 * hot enabling -- almost certainly because of a disorderly 15216 * shutdown of a consumer. (That is, a consumer that is 15217 * exiting without having called dtrace_stop().) In this case, 15218 * we're going to set our activity to be KILLED, and then 15219 * issue a sync to be sure that everyone is out of probe 15220 * context before we start blowing away ECBs. 15221 */ 15222 state->dts_activity = DTRACE_ACTIVITY_KILLED; 15223 dtrace_sync(); 15224 } 15225 15226 /* 15227 * Release the credential hold we took in dtrace_state_create(). 15228 */ 15229 if (state->dts_cred.dcr_cred != NULL) 15230 crfree(state->dts_cred.dcr_cred); 15231 15232 /* 15233 * Now we can safely disable and destroy any enabled probes. Because 15234 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 15235 * (especially if they're all enabled), we take two passes through the 15236 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 15237 * in the second we disable whatever is left over. 15238 */ 15239 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 15240 for (i = 0; i < state->dts_necbs; i++) { 15241 if ((ecb = state->dts_ecbs[i]) == NULL) 15242 continue; 15243 15244 if (match && ecb->dte_probe != NULL) { 15245 dtrace_probe_t *probe = ecb->dte_probe; 15246 dtrace_provider_t *prov = probe->dtpr_provider; 15247 15248 if (!(prov->dtpv_priv.dtpp_flags & match)) 15249 continue; 15250 } 15251 15252 dtrace_ecb_disable(ecb); 15253 dtrace_ecb_destroy(ecb); 15254 } 15255 15256 if (!match) 15257 break; 15258 } 15259 15260 /* 15261 * Before we free the buffers, perform one more sync to assure that 15262 * every CPU is out of probe context. 15263 */ 15264 dtrace_sync(); 15265 15266 dtrace_buffer_free(state->dts_buffer); 15267 dtrace_buffer_free(state->dts_aggbuffer); 15268 15269 for (i = 0; i < nspec; i++) 15270 dtrace_buffer_free(spec[i].dtsp_buffer); 15271 15272 #ifdef illumos 15273 if (state->dts_cleaner != CYCLIC_NONE) 15274 cyclic_remove(state->dts_cleaner); 15275 15276 if (state->dts_deadman != CYCLIC_NONE) 15277 cyclic_remove(state->dts_deadman); 15278 #else 15279 callout_stop(&state->dts_cleaner); 15280 callout_drain(&state->dts_cleaner); 15281 callout_stop(&state->dts_deadman); 15282 callout_drain(&state->dts_deadman); 15283 #endif 15284 15285 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15286 dtrace_vstate_fini(vstate); 15287 if (state->dts_ecbs != NULL) 15288 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15289 15290 if (state->dts_aggregations != NULL) { 15291 #ifdef DEBUG 15292 for (i = 0; i < state->dts_naggregations; i++) 15293 ASSERT(state->dts_aggregations[i] == NULL); 15294 #endif 15295 ASSERT(state->dts_naggregations > 0); 15296 kmem_free(state->dts_aggregations, 15297 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15298 } 15299 15300 kmem_free(state->dts_buffer, bufsize); 15301 kmem_free(state->dts_aggbuffer, bufsize); 15302 15303 for (i = 0; i < nspec; i++) 15304 kmem_free(spec[i].dtsp_buffer, bufsize); 15305 15306 if (spec != NULL) 15307 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15308 15309 dtrace_format_destroy(state); 15310 15311 if (state->dts_aggid_arena != NULL) { 15312 #ifdef illumos 15313 vmem_destroy(state->dts_aggid_arena); 15314 #else 15315 delete_unrhdr(state->dts_aggid_arena); 15316 #endif 15317 state->dts_aggid_arena = NULL; 15318 } 15319 #ifdef illumos 15320 ddi_soft_state_free(dtrace_softstate, minor); 15321 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15322 #endif 15323 } 15324 15325 /* 15326 * DTrace Anonymous Enabling Functions 15327 */ 15328 static dtrace_state_t * 15329 dtrace_anon_grab(void) 15330 { 15331 dtrace_state_t *state; 15332 15333 ASSERT(MUTEX_HELD(&dtrace_lock)); 15334 15335 if ((state = dtrace_anon.dta_state) == NULL) { 15336 ASSERT(dtrace_anon.dta_enabling == NULL); 15337 return (NULL); 15338 } 15339 15340 ASSERT(dtrace_anon.dta_enabling != NULL); 15341 ASSERT(dtrace_retained != NULL); 15342 15343 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15344 dtrace_anon.dta_enabling = NULL; 15345 dtrace_anon.dta_state = NULL; 15346 15347 return (state); 15348 } 15349 15350 static void 15351 dtrace_anon_property(void) 15352 { 15353 int i, rv; 15354 dtrace_state_t *state; 15355 dof_hdr_t *dof; 15356 char c[32]; /* enough for "dof-data-" + digits */ 15357 15358 ASSERT(MUTEX_HELD(&dtrace_lock)); 15359 ASSERT(MUTEX_HELD(&cpu_lock)); 15360 15361 for (i = 0; ; i++) { 15362 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15363 15364 dtrace_err_verbose = 1; 15365 15366 if ((dof = dtrace_dof_property(c)) == NULL) { 15367 dtrace_err_verbose = 0; 15368 break; 15369 } 15370 15371 #ifdef illumos 15372 /* 15373 * We want to create anonymous state, so we need to transition 15374 * the kernel debugger to indicate that DTrace is active. If 15375 * this fails (e.g. because the debugger has modified text in 15376 * some way), we won't continue with the processing. 15377 */ 15378 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15379 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15380 "enabling ignored."); 15381 dtrace_dof_destroy(dof); 15382 break; 15383 } 15384 #endif 15385 15386 /* 15387 * If we haven't allocated an anonymous state, we'll do so now. 15388 */ 15389 if ((state = dtrace_anon.dta_state) == NULL) { 15390 state = dtrace_state_create(NULL, NULL); 15391 dtrace_anon.dta_state = state; 15392 15393 if (state == NULL) { 15394 /* 15395 * This basically shouldn't happen: the only 15396 * failure mode from dtrace_state_create() is a 15397 * failure of ddi_soft_state_zalloc() that 15398 * itself should never happen. Still, the 15399 * interface allows for a failure mode, and 15400 * we want to fail as gracefully as possible: 15401 * we'll emit an error message and cease 15402 * processing anonymous state in this case. 15403 */ 15404 cmn_err(CE_WARN, "failed to create " 15405 "anonymous state"); 15406 dtrace_dof_destroy(dof); 15407 break; 15408 } 15409 } 15410 15411 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15412 &dtrace_anon.dta_enabling, 0, B_TRUE); 15413 15414 if (rv == 0) 15415 rv = dtrace_dof_options(dof, state); 15416 15417 dtrace_err_verbose = 0; 15418 dtrace_dof_destroy(dof); 15419 15420 if (rv != 0) { 15421 /* 15422 * This is malformed DOF; chuck any anonymous state 15423 * that we created. 15424 */ 15425 ASSERT(dtrace_anon.dta_enabling == NULL); 15426 dtrace_state_destroy(state); 15427 dtrace_anon.dta_state = NULL; 15428 break; 15429 } 15430 15431 ASSERT(dtrace_anon.dta_enabling != NULL); 15432 } 15433 15434 if (dtrace_anon.dta_enabling != NULL) { 15435 int rval; 15436 15437 /* 15438 * dtrace_enabling_retain() can only fail because we are 15439 * trying to retain more enablings than are allowed -- but 15440 * we only have one anonymous enabling, and we are guaranteed 15441 * to be allowed at least one retained enabling; we assert 15442 * that dtrace_enabling_retain() returns success. 15443 */ 15444 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15445 ASSERT(rval == 0); 15446 15447 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15448 } 15449 } 15450 15451 /* 15452 * DTrace Helper Functions 15453 */ 15454 static void 15455 dtrace_helper_trace(dtrace_helper_action_t *helper, 15456 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15457 { 15458 uint32_t size, next, nnext, i; 15459 dtrace_helptrace_t *ent, *buffer; 15460 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15461 15462 if ((buffer = dtrace_helptrace_buffer) == NULL) 15463 return; 15464 15465 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15466 15467 /* 15468 * What would a tracing framework be without its own tracing 15469 * framework? (Well, a hell of a lot simpler, for starters...) 15470 */ 15471 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15472 sizeof (uint64_t) - sizeof (uint64_t); 15473 15474 /* 15475 * Iterate until we can allocate a slot in the trace buffer. 15476 */ 15477 do { 15478 next = dtrace_helptrace_next; 15479 15480 if (next + size < dtrace_helptrace_bufsize) { 15481 nnext = next + size; 15482 } else { 15483 nnext = size; 15484 } 15485 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15486 15487 /* 15488 * We have our slot; fill it in. 15489 */ 15490 if (nnext == size) { 15491 dtrace_helptrace_wrapped++; 15492 next = 0; 15493 } 15494 15495 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); 15496 ent->dtht_helper = helper; 15497 ent->dtht_where = where; 15498 ent->dtht_nlocals = vstate->dtvs_nlocals; 15499 15500 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15501 mstate->dtms_fltoffs : -1; 15502 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15503 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15504 15505 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15506 dtrace_statvar_t *svar; 15507 15508 if ((svar = vstate->dtvs_locals[i]) == NULL) 15509 continue; 15510 15511 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15512 ent->dtht_locals[i] = 15513 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15514 } 15515 } 15516 15517 static uint64_t 15518 dtrace_helper(int which, dtrace_mstate_t *mstate, 15519 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15520 { 15521 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15522 uint64_t sarg0 = mstate->dtms_arg[0]; 15523 uint64_t sarg1 = mstate->dtms_arg[1]; 15524 uint64_t rval = 0; 15525 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15526 dtrace_helper_action_t *helper; 15527 dtrace_vstate_t *vstate; 15528 dtrace_difo_t *pred; 15529 int i, trace = dtrace_helptrace_buffer != NULL; 15530 15531 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15532 15533 if (helpers == NULL) 15534 return (0); 15535 15536 if ((helper = helpers->dthps_actions[which]) == NULL) 15537 return (0); 15538 15539 vstate = &helpers->dthps_vstate; 15540 mstate->dtms_arg[0] = arg0; 15541 mstate->dtms_arg[1] = arg1; 15542 15543 /* 15544 * Now iterate over each helper. If its predicate evaluates to 'true', 15545 * we'll call the corresponding actions. Note that the below calls 15546 * to dtrace_dif_emulate() may set faults in machine state. This is 15547 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15548 * the stored DIF offset with its own (which is the desired behavior). 15549 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15550 * from machine state; this is okay, too. 15551 */ 15552 for (; helper != NULL; helper = helper->dtha_next) { 15553 if ((pred = helper->dtha_predicate) != NULL) { 15554 if (trace) 15555 dtrace_helper_trace(helper, mstate, vstate, 0); 15556 15557 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15558 goto next; 15559 15560 if (*flags & CPU_DTRACE_FAULT) 15561 goto err; 15562 } 15563 15564 for (i = 0; i < helper->dtha_nactions; i++) { 15565 if (trace) 15566 dtrace_helper_trace(helper, 15567 mstate, vstate, i + 1); 15568 15569 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15570 mstate, vstate, state); 15571 15572 if (*flags & CPU_DTRACE_FAULT) 15573 goto err; 15574 } 15575 15576 next: 15577 if (trace) 15578 dtrace_helper_trace(helper, mstate, vstate, 15579 DTRACE_HELPTRACE_NEXT); 15580 } 15581 15582 if (trace) 15583 dtrace_helper_trace(helper, mstate, vstate, 15584 DTRACE_HELPTRACE_DONE); 15585 15586 /* 15587 * Restore the arg0 that we saved upon entry. 15588 */ 15589 mstate->dtms_arg[0] = sarg0; 15590 mstate->dtms_arg[1] = sarg1; 15591 15592 return (rval); 15593 15594 err: 15595 if (trace) 15596 dtrace_helper_trace(helper, mstate, vstate, 15597 DTRACE_HELPTRACE_ERR); 15598 15599 /* 15600 * Restore the arg0 that we saved upon entry. 15601 */ 15602 mstate->dtms_arg[0] = sarg0; 15603 mstate->dtms_arg[1] = sarg1; 15604 15605 return (0); 15606 } 15607 15608 static void 15609 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15610 dtrace_vstate_t *vstate) 15611 { 15612 int i; 15613 15614 if (helper->dtha_predicate != NULL) 15615 dtrace_difo_release(helper->dtha_predicate, vstate); 15616 15617 for (i = 0; i < helper->dtha_nactions; i++) { 15618 ASSERT(helper->dtha_actions[i] != NULL); 15619 dtrace_difo_release(helper->dtha_actions[i], vstate); 15620 } 15621 15622 kmem_free(helper->dtha_actions, 15623 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15624 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15625 } 15626 15627 static int 15628 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) 15629 { 15630 proc_t *p = curproc; 15631 dtrace_vstate_t *vstate; 15632 int i; 15633 15634 if (help == NULL) 15635 help = p->p_dtrace_helpers; 15636 15637 ASSERT(MUTEX_HELD(&dtrace_lock)); 15638 15639 if (help == NULL || gen > help->dthps_generation) 15640 return (EINVAL); 15641 15642 vstate = &help->dthps_vstate; 15643 15644 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15645 dtrace_helper_action_t *last = NULL, *h, *next; 15646 15647 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15648 next = h->dtha_next; 15649 15650 if (h->dtha_generation == gen) { 15651 if (last != NULL) { 15652 last->dtha_next = next; 15653 } else { 15654 help->dthps_actions[i] = next; 15655 } 15656 15657 dtrace_helper_action_destroy(h, vstate); 15658 } else { 15659 last = h; 15660 } 15661 } 15662 } 15663 15664 /* 15665 * Interate until we've cleared out all helper providers with the 15666 * given generation number. 15667 */ 15668 for (;;) { 15669 dtrace_helper_provider_t *prov; 15670 15671 /* 15672 * Look for a helper provider with the right generation. We 15673 * have to start back at the beginning of the list each time 15674 * because we drop dtrace_lock. It's unlikely that we'll make 15675 * more than two passes. 15676 */ 15677 for (i = 0; i < help->dthps_nprovs; i++) { 15678 prov = help->dthps_provs[i]; 15679 15680 if (prov->dthp_generation == gen) 15681 break; 15682 } 15683 15684 /* 15685 * If there were no matches, we're done. 15686 */ 15687 if (i == help->dthps_nprovs) 15688 break; 15689 15690 /* 15691 * Move the last helper provider into this slot. 15692 */ 15693 help->dthps_nprovs--; 15694 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15695 help->dthps_provs[help->dthps_nprovs] = NULL; 15696 15697 mutex_exit(&dtrace_lock); 15698 15699 /* 15700 * If we have a meta provider, remove this helper provider. 15701 */ 15702 mutex_enter(&dtrace_meta_lock); 15703 if (dtrace_meta_pid != NULL) { 15704 ASSERT(dtrace_deferred_pid == NULL); 15705 dtrace_helper_provider_remove(&prov->dthp_prov, 15706 p->p_pid); 15707 } 15708 mutex_exit(&dtrace_meta_lock); 15709 15710 dtrace_helper_provider_destroy(prov); 15711 15712 mutex_enter(&dtrace_lock); 15713 } 15714 15715 return (0); 15716 } 15717 15718 static int 15719 dtrace_helper_validate(dtrace_helper_action_t *helper) 15720 { 15721 int err = 0, i; 15722 dtrace_difo_t *dp; 15723 15724 if ((dp = helper->dtha_predicate) != NULL) 15725 err += dtrace_difo_validate_helper(dp); 15726 15727 for (i = 0; i < helper->dtha_nactions; i++) 15728 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15729 15730 return (err == 0); 15731 } 15732 15733 static int 15734 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, 15735 dtrace_helpers_t *help) 15736 { 15737 dtrace_helper_action_t *helper, *last; 15738 dtrace_actdesc_t *act; 15739 dtrace_vstate_t *vstate; 15740 dtrace_predicate_t *pred; 15741 int count = 0, nactions = 0, i; 15742 15743 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15744 return (EINVAL); 15745 15746 last = help->dthps_actions[which]; 15747 vstate = &help->dthps_vstate; 15748 15749 for (count = 0; last != NULL; last = last->dtha_next) { 15750 count++; 15751 if (last->dtha_next == NULL) 15752 break; 15753 } 15754 15755 /* 15756 * If we already have dtrace_helper_actions_max helper actions for this 15757 * helper action type, we'll refuse to add a new one. 15758 */ 15759 if (count >= dtrace_helper_actions_max) 15760 return (ENOSPC); 15761 15762 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15763 helper->dtha_generation = help->dthps_generation; 15764 15765 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15766 ASSERT(pred->dtp_difo != NULL); 15767 dtrace_difo_hold(pred->dtp_difo); 15768 helper->dtha_predicate = pred->dtp_difo; 15769 } 15770 15771 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15772 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15773 goto err; 15774 15775 if (act->dtad_difo == NULL) 15776 goto err; 15777 15778 nactions++; 15779 } 15780 15781 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15782 (helper->dtha_nactions = nactions), KM_SLEEP); 15783 15784 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15785 dtrace_difo_hold(act->dtad_difo); 15786 helper->dtha_actions[i++] = act->dtad_difo; 15787 } 15788 15789 if (!dtrace_helper_validate(helper)) 15790 goto err; 15791 15792 if (last == NULL) { 15793 help->dthps_actions[which] = helper; 15794 } else { 15795 last->dtha_next = helper; 15796 } 15797 15798 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15799 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15800 dtrace_helptrace_next = 0; 15801 } 15802 15803 return (0); 15804 err: 15805 dtrace_helper_action_destroy(helper, vstate); 15806 return (EINVAL); 15807 } 15808 15809 static void 15810 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 15811 dof_helper_t *dofhp) 15812 { 15813 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 15814 15815 mutex_enter(&dtrace_meta_lock); 15816 mutex_enter(&dtrace_lock); 15817 15818 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 15819 /* 15820 * If the dtrace module is loaded but not attached, or if 15821 * there aren't isn't a meta provider registered to deal with 15822 * these provider descriptions, we need to postpone creating 15823 * the actual providers until later. 15824 */ 15825 15826 if (help->dthps_next == NULL && help->dthps_prev == NULL && 15827 dtrace_deferred_pid != help) { 15828 help->dthps_deferred = 1; 15829 help->dthps_pid = p->p_pid; 15830 help->dthps_next = dtrace_deferred_pid; 15831 help->dthps_prev = NULL; 15832 if (dtrace_deferred_pid != NULL) 15833 dtrace_deferred_pid->dthps_prev = help; 15834 dtrace_deferred_pid = help; 15835 } 15836 15837 mutex_exit(&dtrace_lock); 15838 15839 } else if (dofhp != NULL) { 15840 /* 15841 * If the dtrace module is loaded and we have a particular 15842 * helper provider description, pass that off to the 15843 * meta provider. 15844 */ 15845 15846 mutex_exit(&dtrace_lock); 15847 15848 dtrace_helper_provide(dofhp, p->p_pid); 15849 15850 } else { 15851 /* 15852 * Otherwise, just pass all the helper provider descriptions 15853 * off to the meta provider. 15854 */ 15855 15856 int i; 15857 mutex_exit(&dtrace_lock); 15858 15859 for (i = 0; i < help->dthps_nprovs; i++) { 15860 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 15861 p->p_pid); 15862 } 15863 } 15864 15865 mutex_exit(&dtrace_meta_lock); 15866 } 15867 15868 static int 15869 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) 15870 { 15871 dtrace_helper_provider_t *hprov, **tmp_provs; 15872 uint_t tmp_maxprovs, i; 15873 15874 ASSERT(MUTEX_HELD(&dtrace_lock)); 15875 ASSERT(help != NULL); 15876 15877 /* 15878 * If we already have dtrace_helper_providers_max helper providers, 15879 * we're refuse to add a new one. 15880 */ 15881 if (help->dthps_nprovs >= dtrace_helper_providers_max) 15882 return (ENOSPC); 15883 15884 /* 15885 * Check to make sure this isn't a duplicate. 15886 */ 15887 for (i = 0; i < help->dthps_nprovs; i++) { 15888 if (dofhp->dofhp_addr == 15889 help->dthps_provs[i]->dthp_prov.dofhp_addr) 15890 return (EALREADY); 15891 } 15892 15893 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 15894 hprov->dthp_prov = *dofhp; 15895 hprov->dthp_ref = 1; 15896 hprov->dthp_generation = gen; 15897 15898 /* 15899 * Allocate a bigger table for helper providers if it's already full. 15900 */ 15901 if (help->dthps_maxprovs == help->dthps_nprovs) { 15902 tmp_maxprovs = help->dthps_maxprovs; 15903 tmp_provs = help->dthps_provs; 15904 15905 if (help->dthps_maxprovs == 0) 15906 help->dthps_maxprovs = 2; 15907 else 15908 help->dthps_maxprovs *= 2; 15909 if (help->dthps_maxprovs > dtrace_helper_providers_max) 15910 help->dthps_maxprovs = dtrace_helper_providers_max; 15911 15912 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 15913 15914 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 15915 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15916 15917 if (tmp_provs != NULL) { 15918 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 15919 sizeof (dtrace_helper_provider_t *)); 15920 kmem_free(tmp_provs, tmp_maxprovs * 15921 sizeof (dtrace_helper_provider_t *)); 15922 } 15923 } 15924 15925 help->dthps_provs[help->dthps_nprovs] = hprov; 15926 help->dthps_nprovs++; 15927 15928 return (0); 15929 } 15930 15931 static void 15932 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 15933 { 15934 mutex_enter(&dtrace_lock); 15935 15936 if (--hprov->dthp_ref == 0) { 15937 dof_hdr_t *dof; 15938 mutex_exit(&dtrace_lock); 15939 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 15940 dtrace_dof_destroy(dof); 15941 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 15942 } else { 15943 mutex_exit(&dtrace_lock); 15944 } 15945 } 15946 15947 static int 15948 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 15949 { 15950 uintptr_t daddr = (uintptr_t)dof; 15951 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 15952 dof_provider_t *provider; 15953 dof_probe_t *probe; 15954 uint8_t *arg; 15955 char *strtab, *typestr; 15956 dof_stridx_t typeidx; 15957 size_t typesz; 15958 uint_t nprobes, j, k; 15959 15960 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 15961 15962 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 15963 dtrace_dof_error(dof, "misaligned section offset"); 15964 return (-1); 15965 } 15966 15967 /* 15968 * The section needs to be large enough to contain the DOF provider 15969 * structure appropriate for the given version. 15970 */ 15971 if (sec->dofs_size < 15972 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 15973 offsetof(dof_provider_t, dofpv_prenoffs) : 15974 sizeof (dof_provider_t))) { 15975 dtrace_dof_error(dof, "provider section too small"); 15976 return (-1); 15977 } 15978 15979 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 15980 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 15981 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 15982 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 15983 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 15984 15985 if (str_sec == NULL || prb_sec == NULL || 15986 arg_sec == NULL || off_sec == NULL) 15987 return (-1); 15988 15989 enoff_sec = NULL; 15990 15991 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 15992 provider->dofpv_prenoffs != DOF_SECT_NONE && 15993 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 15994 provider->dofpv_prenoffs)) == NULL) 15995 return (-1); 15996 15997 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 15998 15999 if (provider->dofpv_name >= str_sec->dofs_size || 16000 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 16001 dtrace_dof_error(dof, "invalid provider name"); 16002 return (-1); 16003 } 16004 16005 if (prb_sec->dofs_entsize == 0 || 16006 prb_sec->dofs_entsize > prb_sec->dofs_size) { 16007 dtrace_dof_error(dof, "invalid entry size"); 16008 return (-1); 16009 } 16010 16011 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 16012 dtrace_dof_error(dof, "misaligned entry size"); 16013 return (-1); 16014 } 16015 16016 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 16017 dtrace_dof_error(dof, "invalid entry size"); 16018 return (-1); 16019 } 16020 16021 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 16022 dtrace_dof_error(dof, "misaligned section offset"); 16023 return (-1); 16024 } 16025 16026 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 16027 dtrace_dof_error(dof, "invalid entry size"); 16028 return (-1); 16029 } 16030 16031 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 16032 16033 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 16034 16035 /* 16036 * Take a pass through the probes to check for errors. 16037 */ 16038 for (j = 0; j < nprobes; j++) { 16039 probe = (dof_probe_t *)(uintptr_t)(daddr + 16040 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 16041 16042 if (probe->dofpr_func >= str_sec->dofs_size) { 16043 dtrace_dof_error(dof, "invalid function name"); 16044 return (-1); 16045 } 16046 16047 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 16048 dtrace_dof_error(dof, "function name too long"); 16049 /* 16050 * Keep going if the function name is too long. 16051 * Unlike provider and probe names, we cannot reasonably 16052 * impose restrictions on function names, since they're 16053 * a property of the code being instrumented. We will 16054 * skip this probe in dtrace_helper_provide_one(). 16055 */ 16056 } 16057 16058 if (probe->dofpr_name >= str_sec->dofs_size || 16059 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 16060 dtrace_dof_error(dof, "invalid probe name"); 16061 return (-1); 16062 } 16063 16064 /* 16065 * The offset count must not wrap the index, and the offsets 16066 * must also not overflow the section's data. 16067 */ 16068 if (probe->dofpr_offidx + probe->dofpr_noffs < 16069 probe->dofpr_offidx || 16070 (probe->dofpr_offidx + probe->dofpr_noffs) * 16071 off_sec->dofs_entsize > off_sec->dofs_size) { 16072 dtrace_dof_error(dof, "invalid probe offset"); 16073 return (-1); 16074 } 16075 16076 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 16077 /* 16078 * If there's no is-enabled offset section, make sure 16079 * there aren't any is-enabled offsets. Otherwise 16080 * perform the same checks as for probe offsets 16081 * (immediately above). 16082 */ 16083 if (enoff_sec == NULL) { 16084 if (probe->dofpr_enoffidx != 0 || 16085 probe->dofpr_nenoffs != 0) { 16086 dtrace_dof_error(dof, "is-enabled " 16087 "offsets with null section"); 16088 return (-1); 16089 } 16090 } else if (probe->dofpr_enoffidx + 16091 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 16092 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 16093 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 16094 dtrace_dof_error(dof, "invalid is-enabled " 16095 "offset"); 16096 return (-1); 16097 } 16098 16099 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 16100 dtrace_dof_error(dof, "zero probe and " 16101 "is-enabled offsets"); 16102 return (-1); 16103 } 16104 } else if (probe->dofpr_noffs == 0) { 16105 dtrace_dof_error(dof, "zero probe offsets"); 16106 return (-1); 16107 } 16108 16109 if (probe->dofpr_argidx + probe->dofpr_xargc < 16110 probe->dofpr_argidx || 16111 (probe->dofpr_argidx + probe->dofpr_xargc) * 16112 arg_sec->dofs_entsize > arg_sec->dofs_size) { 16113 dtrace_dof_error(dof, "invalid args"); 16114 return (-1); 16115 } 16116 16117 typeidx = probe->dofpr_nargv; 16118 typestr = strtab + probe->dofpr_nargv; 16119 for (k = 0; k < probe->dofpr_nargc; k++) { 16120 if (typeidx >= str_sec->dofs_size) { 16121 dtrace_dof_error(dof, "bad " 16122 "native argument type"); 16123 return (-1); 16124 } 16125 16126 typesz = strlen(typestr) + 1; 16127 if (typesz > DTRACE_ARGTYPELEN) { 16128 dtrace_dof_error(dof, "native " 16129 "argument type too long"); 16130 return (-1); 16131 } 16132 typeidx += typesz; 16133 typestr += typesz; 16134 } 16135 16136 typeidx = probe->dofpr_xargv; 16137 typestr = strtab + probe->dofpr_xargv; 16138 for (k = 0; k < probe->dofpr_xargc; k++) { 16139 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 16140 dtrace_dof_error(dof, "bad " 16141 "native argument index"); 16142 return (-1); 16143 } 16144 16145 if (typeidx >= str_sec->dofs_size) { 16146 dtrace_dof_error(dof, "bad " 16147 "translated argument type"); 16148 return (-1); 16149 } 16150 16151 typesz = strlen(typestr) + 1; 16152 if (typesz > DTRACE_ARGTYPELEN) { 16153 dtrace_dof_error(dof, "translated argument " 16154 "type too long"); 16155 return (-1); 16156 } 16157 16158 typeidx += typesz; 16159 typestr += typesz; 16160 } 16161 } 16162 16163 return (0); 16164 } 16165 16166 static int 16167 #ifdef __FreeBSD__ 16168 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p) 16169 #else 16170 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 16171 #endif 16172 { 16173 dtrace_helpers_t *help; 16174 dtrace_vstate_t *vstate; 16175 dtrace_enabling_t *enab = NULL; 16176 #ifndef __FreeBSD__ 16177 proc_t *p = curproc; 16178 #endif 16179 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 16180 uintptr_t daddr = (uintptr_t)dof; 16181 16182 ASSERT(MUTEX_HELD(&dtrace_lock)); 16183 16184 if ((help = p->p_dtrace_helpers) == NULL) 16185 help = dtrace_helpers_create(p); 16186 16187 vstate = &help->dthps_vstate; 16188 16189 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 16190 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 16191 dtrace_dof_destroy(dof); 16192 return (rv); 16193 } 16194 16195 /* 16196 * Look for helper providers and validate their descriptions. 16197 */ 16198 if (dhp != NULL) { 16199 for (i = 0; i < dof->dofh_secnum; i++) { 16200 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 16201 dof->dofh_secoff + i * dof->dofh_secsize); 16202 16203 if (sec->dofs_type != DOF_SECT_PROVIDER) 16204 continue; 16205 16206 if (dtrace_helper_provider_validate(dof, sec) != 0) { 16207 dtrace_enabling_destroy(enab); 16208 dtrace_dof_destroy(dof); 16209 return (-1); 16210 } 16211 16212 nprovs++; 16213 } 16214 } 16215 16216 /* 16217 * Now we need to walk through the ECB descriptions in the enabling. 16218 */ 16219 for (i = 0; i < enab->dten_ndesc; i++) { 16220 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 16221 dtrace_probedesc_t *desc = &ep->dted_probe; 16222 16223 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 16224 continue; 16225 16226 if (strcmp(desc->dtpd_mod, "helper") != 0) 16227 continue; 16228 16229 if (strcmp(desc->dtpd_func, "ustack") != 0) 16230 continue; 16231 16232 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 16233 ep, help)) != 0) { 16234 /* 16235 * Adding this helper action failed -- we are now going 16236 * to rip out the entire generation and return failure. 16237 */ 16238 (void) dtrace_helper_destroygen(help, 16239 help->dthps_generation); 16240 dtrace_enabling_destroy(enab); 16241 dtrace_dof_destroy(dof); 16242 return (-1); 16243 } 16244 16245 nhelpers++; 16246 } 16247 16248 if (nhelpers < enab->dten_ndesc) 16249 dtrace_dof_error(dof, "unmatched helpers"); 16250 16251 gen = help->dthps_generation++; 16252 dtrace_enabling_destroy(enab); 16253 16254 if (dhp != NULL && nprovs > 0) { 16255 /* 16256 * Now that this is in-kernel, we change the sense of the 16257 * members: dofhp_dof denotes the in-kernel copy of the DOF 16258 * and dofhp_addr denotes the address at user-level. 16259 */ 16260 dhp->dofhp_addr = dhp->dofhp_dof; 16261 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 16262 16263 if (dtrace_helper_provider_add(dhp, help, gen) == 0) { 16264 mutex_exit(&dtrace_lock); 16265 dtrace_helper_provider_register(p, help, dhp); 16266 mutex_enter(&dtrace_lock); 16267 16268 destroy = 0; 16269 } 16270 } 16271 16272 if (destroy) 16273 dtrace_dof_destroy(dof); 16274 16275 return (gen); 16276 } 16277 16278 static dtrace_helpers_t * 16279 dtrace_helpers_create(proc_t *p) 16280 { 16281 dtrace_helpers_t *help; 16282 16283 ASSERT(MUTEX_HELD(&dtrace_lock)); 16284 ASSERT(p->p_dtrace_helpers == NULL); 16285 16286 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16287 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16288 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16289 16290 p->p_dtrace_helpers = help; 16291 dtrace_helpers++; 16292 16293 return (help); 16294 } 16295 16296 #ifdef illumos 16297 static 16298 #endif 16299 void 16300 dtrace_helpers_destroy(proc_t *p) 16301 { 16302 dtrace_helpers_t *help; 16303 dtrace_vstate_t *vstate; 16304 #ifdef illumos 16305 proc_t *p = curproc; 16306 #endif 16307 int i; 16308 16309 mutex_enter(&dtrace_lock); 16310 16311 ASSERT(p->p_dtrace_helpers != NULL); 16312 ASSERT(dtrace_helpers > 0); 16313 16314 help = p->p_dtrace_helpers; 16315 vstate = &help->dthps_vstate; 16316 16317 /* 16318 * We're now going to lose the help from this process. 16319 */ 16320 p->p_dtrace_helpers = NULL; 16321 dtrace_sync(); 16322 16323 /* 16324 * Destory the helper actions. 16325 */ 16326 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16327 dtrace_helper_action_t *h, *next; 16328 16329 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16330 next = h->dtha_next; 16331 dtrace_helper_action_destroy(h, vstate); 16332 h = next; 16333 } 16334 } 16335 16336 mutex_exit(&dtrace_lock); 16337 16338 /* 16339 * Destroy the helper providers. 16340 */ 16341 if (help->dthps_maxprovs > 0) { 16342 mutex_enter(&dtrace_meta_lock); 16343 if (dtrace_meta_pid != NULL) { 16344 ASSERT(dtrace_deferred_pid == NULL); 16345 16346 for (i = 0; i < help->dthps_nprovs; i++) { 16347 dtrace_helper_provider_remove( 16348 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16349 } 16350 } else { 16351 mutex_enter(&dtrace_lock); 16352 ASSERT(help->dthps_deferred == 0 || 16353 help->dthps_next != NULL || 16354 help->dthps_prev != NULL || 16355 help == dtrace_deferred_pid); 16356 16357 /* 16358 * Remove the helper from the deferred list. 16359 */ 16360 if (help->dthps_next != NULL) 16361 help->dthps_next->dthps_prev = help->dthps_prev; 16362 if (help->dthps_prev != NULL) 16363 help->dthps_prev->dthps_next = help->dthps_next; 16364 if (dtrace_deferred_pid == help) { 16365 dtrace_deferred_pid = help->dthps_next; 16366 ASSERT(help->dthps_prev == NULL); 16367 } 16368 16369 mutex_exit(&dtrace_lock); 16370 } 16371 16372 mutex_exit(&dtrace_meta_lock); 16373 16374 for (i = 0; i < help->dthps_nprovs; i++) { 16375 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16376 } 16377 16378 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16379 sizeof (dtrace_helper_provider_t *)); 16380 } 16381 16382 mutex_enter(&dtrace_lock); 16383 16384 dtrace_vstate_fini(&help->dthps_vstate); 16385 kmem_free(help->dthps_actions, 16386 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16387 kmem_free(help, sizeof (dtrace_helpers_t)); 16388 16389 --dtrace_helpers; 16390 mutex_exit(&dtrace_lock); 16391 } 16392 16393 #ifdef illumos 16394 static 16395 #endif 16396 void 16397 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16398 { 16399 dtrace_helpers_t *help, *newhelp; 16400 dtrace_helper_action_t *helper, *new, *last; 16401 dtrace_difo_t *dp; 16402 dtrace_vstate_t *vstate; 16403 int i, j, sz, hasprovs = 0; 16404 16405 mutex_enter(&dtrace_lock); 16406 ASSERT(from->p_dtrace_helpers != NULL); 16407 ASSERT(dtrace_helpers > 0); 16408 16409 help = from->p_dtrace_helpers; 16410 newhelp = dtrace_helpers_create(to); 16411 ASSERT(to->p_dtrace_helpers != NULL); 16412 16413 newhelp->dthps_generation = help->dthps_generation; 16414 vstate = &newhelp->dthps_vstate; 16415 16416 /* 16417 * Duplicate the helper actions. 16418 */ 16419 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16420 if ((helper = help->dthps_actions[i]) == NULL) 16421 continue; 16422 16423 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16424 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16425 KM_SLEEP); 16426 new->dtha_generation = helper->dtha_generation; 16427 16428 if ((dp = helper->dtha_predicate) != NULL) { 16429 dp = dtrace_difo_duplicate(dp, vstate); 16430 new->dtha_predicate = dp; 16431 } 16432 16433 new->dtha_nactions = helper->dtha_nactions; 16434 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16435 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16436 16437 for (j = 0; j < new->dtha_nactions; j++) { 16438 dtrace_difo_t *dp = helper->dtha_actions[j]; 16439 16440 ASSERT(dp != NULL); 16441 dp = dtrace_difo_duplicate(dp, vstate); 16442 new->dtha_actions[j] = dp; 16443 } 16444 16445 if (last != NULL) { 16446 last->dtha_next = new; 16447 } else { 16448 newhelp->dthps_actions[i] = new; 16449 } 16450 16451 last = new; 16452 } 16453 } 16454 16455 /* 16456 * Duplicate the helper providers and register them with the 16457 * DTrace framework. 16458 */ 16459 if (help->dthps_nprovs > 0) { 16460 newhelp->dthps_nprovs = help->dthps_nprovs; 16461 newhelp->dthps_maxprovs = help->dthps_nprovs; 16462 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16463 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16464 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16465 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16466 newhelp->dthps_provs[i]->dthp_ref++; 16467 } 16468 16469 hasprovs = 1; 16470 } 16471 16472 mutex_exit(&dtrace_lock); 16473 16474 if (hasprovs) 16475 dtrace_helper_provider_register(to, newhelp, NULL); 16476 } 16477 16478 /* 16479 * DTrace Hook Functions 16480 */ 16481 static void 16482 dtrace_module_loaded(modctl_t *ctl) 16483 { 16484 dtrace_provider_t *prv; 16485 16486 mutex_enter(&dtrace_provider_lock); 16487 #ifdef illumos 16488 mutex_enter(&mod_lock); 16489 #endif 16490 16491 #ifdef illumos 16492 ASSERT(ctl->mod_busy); 16493 #endif 16494 16495 /* 16496 * We're going to call each providers per-module provide operation 16497 * specifying only this module. 16498 */ 16499 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16500 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16501 16502 #ifdef illumos 16503 mutex_exit(&mod_lock); 16504 #endif 16505 mutex_exit(&dtrace_provider_lock); 16506 16507 /* 16508 * If we have any retained enablings, we need to match against them. 16509 * Enabling probes requires that cpu_lock be held, and we cannot hold 16510 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16511 * module. (In particular, this happens when loading scheduling 16512 * classes.) So if we have any retained enablings, we need to dispatch 16513 * our task queue to do the match for us. 16514 */ 16515 mutex_enter(&dtrace_lock); 16516 16517 if (dtrace_retained == NULL) { 16518 mutex_exit(&dtrace_lock); 16519 return; 16520 } 16521 16522 (void) taskq_dispatch(dtrace_taskq, 16523 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16524 16525 mutex_exit(&dtrace_lock); 16526 16527 /* 16528 * And now, for a little heuristic sleaze: in general, we want to 16529 * match modules as soon as they load. However, we cannot guarantee 16530 * this, because it would lead us to the lock ordering violation 16531 * outlined above. The common case, of course, is that cpu_lock is 16532 * _not_ held -- so we delay here for a clock tick, hoping that that's 16533 * long enough for the task queue to do its work. If it's not, it's 16534 * not a serious problem -- it just means that the module that we 16535 * just loaded may not be immediately instrumentable. 16536 */ 16537 delay(1); 16538 } 16539 16540 static void 16541 #ifdef illumos 16542 dtrace_module_unloaded(modctl_t *ctl) 16543 #else 16544 dtrace_module_unloaded(modctl_t *ctl, int *error) 16545 #endif 16546 { 16547 dtrace_probe_t template, *probe, *first, *next; 16548 dtrace_provider_t *prov; 16549 #ifndef illumos 16550 char modname[DTRACE_MODNAMELEN]; 16551 size_t len; 16552 #endif 16553 16554 #ifdef illumos 16555 template.dtpr_mod = ctl->mod_modname; 16556 #else 16557 /* Handle the fact that ctl->filename may end in ".ko". */ 16558 strlcpy(modname, ctl->filename, sizeof(modname)); 16559 len = strlen(ctl->filename); 16560 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16561 modname[len - 3] = '\0'; 16562 template.dtpr_mod = modname; 16563 #endif 16564 16565 mutex_enter(&dtrace_provider_lock); 16566 #ifdef illumos 16567 mutex_enter(&mod_lock); 16568 #endif 16569 mutex_enter(&dtrace_lock); 16570 16571 #ifndef illumos 16572 if (ctl->nenabled > 0) { 16573 /* Don't allow unloads if a probe is enabled. */ 16574 mutex_exit(&dtrace_provider_lock); 16575 mutex_exit(&dtrace_lock); 16576 *error = -1; 16577 printf( 16578 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16579 return; 16580 } 16581 #endif 16582 16583 if (dtrace_bymod == NULL) { 16584 /* 16585 * The DTrace module is loaded (obviously) but not attached; 16586 * we don't have any work to do. 16587 */ 16588 mutex_exit(&dtrace_provider_lock); 16589 #ifdef illumos 16590 mutex_exit(&mod_lock); 16591 #endif 16592 mutex_exit(&dtrace_lock); 16593 return; 16594 } 16595 16596 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16597 probe != NULL; probe = probe->dtpr_nextmod) { 16598 if (probe->dtpr_ecb != NULL) { 16599 mutex_exit(&dtrace_provider_lock); 16600 #ifdef illumos 16601 mutex_exit(&mod_lock); 16602 #endif 16603 mutex_exit(&dtrace_lock); 16604 16605 /* 16606 * This shouldn't _actually_ be possible -- we're 16607 * unloading a module that has an enabled probe in it. 16608 * (It's normally up to the provider to make sure that 16609 * this can't happen.) However, because dtps_enable() 16610 * doesn't have a failure mode, there can be an 16611 * enable/unload race. Upshot: we don't want to 16612 * assert, but we're not going to disable the 16613 * probe, either. 16614 */ 16615 if (dtrace_err_verbose) { 16616 #ifdef illumos 16617 cmn_err(CE_WARN, "unloaded module '%s' had " 16618 "enabled probes", ctl->mod_modname); 16619 #else 16620 cmn_err(CE_WARN, "unloaded module '%s' had " 16621 "enabled probes", modname); 16622 #endif 16623 } 16624 16625 return; 16626 } 16627 } 16628 16629 probe = first; 16630 16631 for (first = NULL; probe != NULL; probe = next) { 16632 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16633 16634 dtrace_probes[probe->dtpr_id - 1] = NULL; 16635 16636 next = probe->dtpr_nextmod; 16637 dtrace_hash_remove(dtrace_bymod, probe); 16638 dtrace_hash_remove(dtrace_byfunc, probe); 16639 dtrace_hash_remove(dtrace_byname, probe); 16640 16641 if (first == NULL) { 16642 first = probe; 16643 probe->dtpr_nextmod = NULL; 16644 } else { 16645 probe->dtpr_nextmod = first; 16646 first = probe; 16647 } 16648 } 16649 16650 /* 16651 * We've removed all of the module's probes from the hash chains and 16652 * from the probe array. Now issue a dtrace_sync() to be sure that 16653 * everyone has cleared out from any probe array processing. 16654 */ 16655 dtrace_sync(); 16656 16657 for (probe = first; probe != NULL; probe = first) { 16658 first = probe->dtpr_nextmod; 16659 prov = probe->dtpr_provider; 16660 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16661 probe->dtpr_arg); 16662 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16663 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16664 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16665 #ifdef illumos 16666 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16667 #else 16668 free_unr(dtrace_arena, probe->dtpr_id); 16669 #endif 16670 kmem_free(probe, sizeof (dtrace_probe_t)); 16671 } 16672 16673 mutex_exit(&dtrace_lock); 16674 #ifdef illumos 16675 mutex_exit(&mod_lock); 16676 #endif 16677 mutex_exit(&dtrace_provider_lock); 16678 } 16679 16680 #ifndef illumos 16681 static void 16682 dtrace_kld_load(void *arg __unused, linker_file_t lf) 16683 { 16684 16685 dtrace_module_loaded(lf); 16686 } 16687 16688 static void 16689 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16690 { 16691 16692 if (*error != 0) 16693 /* We already have an error, so don't do anything. */ 16694 return; 16695 dtrace_module_unloaded(lf, error); 16696 } 16697 #endif 16698 16699 #ifdef illumos 16700 static void 16701 dtrace_suspend(void) 16702 { 16703 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16704 } 16705 16706 static void 16707 dtrace_resume(void) 16708 { 16709 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16710 } 16711 #endif 16712 16713 static int 16714 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16715 { 16716 ASSERT(MUTEX_HELD(&cpu_lock)); 16717 mutex_enter(&dtrace_lock); 16718 16719 switch (what) { 16720 case CPU_CONFIG: { 16721 dtrace_state_t *state; 16722 dtrace_optval_t *opt, rs, c; 16723 16724 /* 16725 * For now, we only allocate a new buffer for anonymous state. 16726 */ 16727 if ((state = dtrace_anon.dta_state) == NULL) 16728 break; 16729 16730 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16731 break; 16732 16733 opt = state->dts_options; 16734 c = opt[DTRACEOPT_CPU]; 16735 16736 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16737 break; 16738 16739 /* 16740 * Regardless of what the actual policy is, we're going to 16741 * temporarily set our resize policy to be manual. We're 16742 * also going to temporarily set our CPU option to denote 16743 * the newly configured CPU. 16744 */ 16745 rs = opt[DTRACEOPT_BUFRESIZE]; 16746 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16747 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16748 16749 (void) dtrace_state_buffers(state); 16750 16751 opt[DTRACEOPT_BUFRESIZE] = rs; 16752 opt[DTRACEOPT_CPU] = c; 16753 16754 break; 16755 } 16756 16757 case CPU_UNCONFIG: 16758 /* 16759 * We don't free the buffer in the CPU_UNCONFIG case. (The 16760 * buffer will be freed when the consumer exits.) 16761 */ 16762 break; 16763 16764 default: 16765 break; 16766 } 16767 16768 mutex_exit(&dtrace_lock); 16769 return (0); 16770 } 16771 16772 #ifdef illumos 16773 static void 16774 dtrace_cpu_setup_initial(processorid_t cpu) 16775 { 16776 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16777 } 16778 #endif 16779 16780 static void 16781 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16782 { 16783 if (dtrace_toxranges >= dtrace_toxranges_max) { 16784 int osize, nsize; 16785 dtrace_toxrange_t *range; 16786 16787 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16788 16789 if (osize == 0) { 16790 ASSERT(dtrace_toxrange == NULL); 16791 ASSERT(dtrace_toxranges_max == 0); 16792 dtrace_toxranges_max = 1; 16793 } else { 16794 dtrace_toxranges_max <<= 1; 16795 } 16796 16797 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16798 range = kmem_zalloc(nsize, KM_SLEEP); 16799 16800 if (dtrace_toxrange != NULL) { 16801 ASSERT(osize != 0); 16802 bcopy(dtrace_toxrange, range, osize); 16803 kmem_free(dtrace_toxrange, osize); 16804 } 16805 16806 dtrace_toxrange = range; 16807 } 16808 16809 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16810 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16811 16812 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16813 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16814 dtrace_toxranges++; 16815 } 16816 16817 static void 16818 dtrace_getf_barrier() 16819 { 16820 #ifdef illumos 16821 /* 16822 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 16823 * that contain calls to getf(), this routine will be called on every 16824 * closef() before either the underlying vnode is released or the 16825 * file_t itself is freed. By the time we are here, it is essential 16826 * that the file_t can no longer be accessed from a call to getf() 16827 * in probe context -- that assures that a dtrace_sync() can be used 16828 * to clear out any enablings referring to the old structures. 16829 */ 16830 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 16831 kcred->cr_zone->zone_dtrace_getf != 0) 16832 dtrace_sync(); 16833 #endif 16834 } 16835 16836 /* 16837 * DTrace Driver Cookbook Functions 16838 */ 16839 #ifdef illumos 16840 /*ARGSUSED*/ 16841 static int 16842 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 16843 { 16844 dtrace_provider_id_t id; 16845 dtrace_state_t *state = NULL; 16846 dtrace_enabling_t *enab; 16847 16848 mutex_enter(&cpu_lock); 16849 mutex_enter(&dtrace_provider_lock); 16850 mutex_enter(&dtrace_lock); 16851 16852 if (ddi_soft_state_init(&dtrace_softstate, 16853 sizeof (dtrace_state_t), 0) != 0) { 16854 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 16855 mutex_exit(&cpu_lock); 16856 mutex_exit(&dtrace_provider_lock); 16857 mutex_exit(&dtrace_lock); 16858 return (DDI_FAILURE); 16859 } 16860 16861 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 16862 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 16863 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 16864 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 16865 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 16866 ddi_remove_minor_node(devi, NULL); 16867 ddi_soft_state_fini(&dtrace_softstate); 16868 mutex_exit(&cpu_lock); 16869 mutex_exit(&dtrace_provider_lock); 16870 mutex_exit(&dtrace_lock); 16871 return (DDI_FAILURE); 16872 } 16873 16874 ddi_report_dev(devi); 16875 dtrace_devi = devi; 16876 16877 dtrace_modload = dtrace_module_loaded; 16878 dtrace_modunload = dtrace_module_unloaded; 16879 dtrace_cpu_init = dtrace_cpu_setup_initial; 16880 dtrace_helpers_cleanup = dtrace_helpers_destroy; 16881 dtrace_helpers_fork = dtrace_helpers_duplicate; 16882 dtrace_cpustart_init = dtrace_suspend; 16883 dtrace_cpustart_fini = dtrace_resume; 16884 dtrace_debugger_init = dtrace_suspend; 16885 dtrace_debugger_fini = dtrace_resume; 16886 16887 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16888 16889 ASSERT(MUTEX_HELD(&cpu_lock)); 16890 16891 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 16892 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 16893 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 16894 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 16895 VM_SLEEP | VMC_IDENTIFIER); 16896 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 16897 1, INT_MAX, 0); 16898 16899 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 16900 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 16901 NULL, NULL, NULL, NULL, NULL, 0); 16902 16903 ASSERT(MUTEX_HELD(&cpu_lock)); 16904 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 16905 offsetof(dtrace_probe_t, dtpr_nextmod), 16906 offsetof(dtrace_probe_t, dtpr_prevmod)); 16907 16908 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 16909 offsetof(dtrace_probe_t, dtpr_nextfunc), 16910 offsetof(dtrace_probe_t, dtpr_prevfunc)); 16911 16912 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 16913 offsetof(dtrace_probe_t, dtpr_nextname), 16914 offsetof(dtrace_probe_t, dtpr_prevname)); 16915 16916 if (dtrace_retain_max < 1) { 16917 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 16918 "setting to 1", dtrace_retain_max); 16919 dtrace_retain_max = 1; 16920 } 16921 16922 /* 16923 * Now discover our toxic ranges. 16924 */ 16925 dtrace_toxic_ranges(dtrace_toxrange_add); 16926 16927 /* 16928 * Before we register ourselves as a provider to our own framework, 16929 * we would like to assert that dtrace_provider is NULL -- but that's 16930 * not true if we were loaded as a dependency of a DTrace provider. 16931 * Once we've registered, we can assert that dtrace_provider is our 16932 * pseudo provider. 16933 */ 16934 (void) dtrace_register("dtrace", &dtrace_provider_attr, 16935 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 16936 16937 ASSERT(dtrace_provider != NULL); 16938 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 16939 16940 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 16941 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 16942 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 16943 dtrace_provider, NULL, NULL, "END", 0, NULL); 16944 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 16945 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 16946 16947 dtrace_anon_property(); 16948 mutex_exit(&cpu_lock); 16949 16950 /* 16951 * If there are already providers, we must ask them to provide their 16952 * probes, and then match any anonymous enabling against them. Note 16953 * that there should be no other retained enablings at this time: 16954 * the only retained enablings at this time should be the anonymous 16955 * enabling. 16956 */ 16957 if (dtrace_anon.dta_enabling != NULL) { 16958 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 16959 16960 dtrace_enabling_provide(NULL); 16961 state = dtrace_anon.dta_state; 16962 16963 /* 16964 * We couldn't hold cpu_lock across the above call to 16965 * dtrace_enabling_provide(), but we must hold it to actually 16966 * enable the probes. We have to drop all of our locks, pick 16967 * up cpu_lock, and regain our locks before matching the 16968 * retained anonymous enabling. 16969 */ 16970 mutex_exit(&dtrace_lock); 16971 mutex_exit(&dtrace_provider_lock); 16972 16973 mutex_enter(&cpu_lock); 16974 mutex_enter(&dtrace_provider_lock); 16975 mutex_enter(&dtrace_lock); 16976 16977 if ((enab = dtrace_anon.dta_enabling) != NULL) 16978 (void) dtrace_enabling_match(enab, NULL); 16979 16980 mutex_exit(&cpu_lock); 16981 } 16982 16983 mutex_exit(&dtrace_lock); 16984 mutex_exit(&dtrace_provider_lock); 16985 16986 if (state != NULL) { 16987 /* 16988 * If we created any anonymous state, set it going now. 16989 */ 16990 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 16991 } 16992 16993 return (DDI_SUCCESS); 16994 } 16995 #endif /* illumos */ 16996 16997 #ifndef illumos 16998 static void dtrace_dtr(void *); 16999 #endif 17000 17001 /*ARGSUSED*/ 17002 static int 17003 #ifdef illumos 17004 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 17005 #else 17006 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 17007 #endif 17008 { 17009 dtrace_state_t *state; 17010 uint32_t priv; 17011 uid_t uid; 17012 zoneid_t zoneid; 17013 17014 #ifdef illumos 17015 if (getminor(*devp) == DTRACEMNRN_HELPER) 17016 return (0); 17017 17018 /* 17019 * If this wasn't an open with the "helper" minor, then it must be 17020 * the "dtrace" minor. 17021 */ 17022 if (getminor(*devp) == DTRACEMNRN_DTRACE) 17023 return (ENXIO); 17024 #else 17025 cred_t *cred_p = NULL; 17026 cred_p = dev->si_cred; 17027 17028 /* 17029 * If no DTRACE_PRIV_* bits are set in the credential, then the 17030 * caller lacks sufficient permission to do anything with DTrace. 17031 */ 17032 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 17033 if (priv == DTRACE_PRIV_NONE) { 17034 #endif 17035 17036 return (EACCES); 17037 } 17038 17039 /* 17040 * Ask all providers to provide all their probes. 17041 */ 17042 mutex_enter(&dtrace_provider_lock); 17043 dtrace_probe_provide(NULL, NULL); 17044 mutex_exit(&dtrace_provider_lock); 17045 17046 mutex_enter(&cpu_lock); 17047 mutex_enter(&dtrace_lock); 17048 dtrace_opens++; 17049 dtrace_membar_producer(); 17050 17051 #ifdef illumos 17052 /* 17053 * If the kernel debugger is active (that is, if the kernel debugger 17054 * modified text in some way), we won't allow the open. 17055 */ 17056 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 17057 dtrace_opens--; 17058 mutex_exit(&cpu_lock); 17059 mutex_exit(&dtrace_lock); 17060 return (EBUSY); 17061 } 17062 17063 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { 17064 /* 17065 * If DTrace helper tracing is enabled, we need to allocate the 17066 * trace buffer and initialize the values. 17067 */ 17068 dtrace_helptrace_buffer = 17069 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 17070 dtrace_helptrace_next = 0; 17071 dtrace_helptrace_wrapped = 0; 17072 dtrace_helptrace_enable = 0; 17073 } 17074 17075 state = dtrace_state_create(devp, cred_p); 17076 #else 17077 state = dtrace_state_create(dev, NULL); 17078 devfs_set_cdevpriv(state, dtrace_dtr); 17079 #endif 17080 17081 mutex_exit(&cpu_lock); 17082 17083 if (state == NULL) { 17084 #ifdef illumos 17085 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17086 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17087 #else 17088 --dtrace_opens; 17089 #endif 17090 mutex_exit(&dtrace_lock); 17091 return (EAGAIN); 17092 } 17093 17094 mutex_exit(&dtrace_lock); 17095 17096 return (0); 17097 } 17098 17099 /*ARGSUSED*/ 17100 #ifdef illumos 17101 static int 17102 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 17103 #else 17104 static void 17105 dtrace_dtr(void *data) 17106 #endif 17107 { 17108 #ifdef illumos 17109 minor_t minor = getminor(dev); 17110 dtrace_state_t *state; 17111 #endif 17112 dtrace_helptrace_t *buf = NULL; 17113 17114 #ifdef illumos 17115 if (minor == DTRACEMNRN_HELPER) 17116 return (0); 17117 17118 state = ddi_get_soft_state(dtrace_softstate, minor); 17119 #else 17120 dtrace_state_t *state = data; 17121 #endif 17122 17123 mutex_enter(&cpu_lock); 17124 mutex_enter(&dtrace_lock); 17125 17126 #ifdef illumos 17127 if (state->dts_anon) 17128 #else 17129 if (state != NULL && state->dts_anon) 17130 #endif 17131 { 17132 /* 17133 * There is anonymous state. Destroy that first. 17134 */ 17135 ASSERT(dtrace_anon.dta_state == NULL); 17136 dtrace_state_destroy(state->dts_anon); 17137 } 17138 17139 if (dtrace_helptrace_disable) { 17140 /* 17141 * If we have been told to disable helper tracing, set the 17142 * buffer to NULL before calling into dtrace_state_destroy(); 17143 * we take advantage of its dtrace_sync() to know that no 17144 * CPU is in probe context with enabled helper tracing 17145 * after it returns. 17146 */ 17147 buf = dtrace_helptrace_buffer; 17148 dtrace_helptrace_buffer = NULL; 17149 } 17150 17151 #ifdef illumos 17152 dtrace_state_destroy(state); 17153 #else 17154 if (state != NULL) { 17155 dtrace_state_destroy(state); 17156 kmem_free(state, 0); 17157 } 17158 #endif 17159 ASSERT(dtrace_opens > 0); 17160 17161 #ifdef illumos 17162 /* 17163 * Only relinquish control of the kernel debugger interface when there 17164 * are no consumers and no anonymous enablings. 17165 */ 17166 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 17167 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17168 #else 17169 --dtrace_opens; 17170 #endif 17171 17172 if (buf != NULL) { 17173 kmem_free(buf, dtrace_helptrace_bufsize); 17174 dtrace_helptrace_disable = 0; 17175 } 17176 17177 mutex_exit(&dtrace_lock); 17178 mutex_exit(&cpu_lock); 17179 17180 #ifdef illumos 17181 return (0); 17182 #endif 17183 } 17184 17185 #ifdef illumos 17186 /*ARGSUSED*/ 17187 static int 17188 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 17189 { 17190 int rval; 17191 dof_helper_t help, *dhp = NULL; 17192 17193 switch (cmd) { 17194 case DTRACEHIOC_ADDDOF: 17195 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 17196 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 17197 return (EFAULT); 17198 } 17199 17200 dhp = &help; 17201 arg = (intptr_t)help.dofhp_dof; 17202 /*FALLTHROUGH*/ 17203 17204 case DTRACEHIOC_ADD: { 17205 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 17206 17207 if (dof == NULL) 17208 return (rval); 17209 17210 mutex_enter(&dtrace_lock); 17211 17212 /* 17213 * dtrace_helper_slurp() takes responsibility for the dof -- 17214 * it may free it now or it may save it and free it later. 17215 */ 17216 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 17217 *rv = rval; 17218 rval = 0; 17219 } else { 17220 rval = EINVAL; 17221 } 17222 17223 mutex_exit(&dtrace_lock); 17224 return (rval); 17225 } 17226 17227 case DTRACEHIOC_REMOVE: { 17228 mutex_enter(&dtrace_lock); 17229 rval = dtrace_helper_destroygen(NULL, arg); 17230 mutex_exit(&dtrace_lock); 17231 17232 return (rval); 17233 } 17234 17235 default: 17236 break; 17237 } 17238 17239 return (ENOTTY); 17240 } 17241 17242 /*ARGSUSED*/ 17243 static int 17244 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 17245 { 17246 minor_t minor = getminor(dev); 17247 dtrace_state_t *state; 17248 int rval; 17249 17250 if (minor == DTRACEMNRN_HELPER) 17251 return (dtrace_ioctl_helper(cmd, arg, rv)); 17252 17253 state = ddi_get_soft_state(dtrace_softstate, minor); 17254 17255 if (state->dts_anon) { 17256 ASSERT(dtrace_anon.dta_state == NULL); 17257 state = state->dts_anon; 17258 } 17259 17260 switch (cmd) { 17261 case DTRACEIOC_PROVIDER: { 17262 dtrace_providerdesc_t pvd; 17263 dtrace_provider_t *pvp; 17264 17265 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17266 return (EFAULT); 17267 17268 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17269 mutex_enter(&dtrace_provider_lock); 17270 17271 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17272 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17273 break; 17274 } 17275 17276 mutex_exit(&dtrace_provider_lock); 17277 17278 if (pvp == NULL) 17279 return (ESRCH); 17280 17281 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17282 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17283 17284 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17285 return (EFAULT); 17286 17287 return (0); 17288 } 17289 17290 case DTRACEIOC_EPROBE: { 17291 dtrace_eprobedesc_t epdesc; 17292 dtrace_ecb_t *ecb; 17293 dtrace_action_t *act; 17294 void *buf; 17295 size_t size; 17296 uintptr_t dest; 17297 int nrecs; 17298 17299 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17300 return (EFAULT); 17301 17302 mutex_enter(&dtrace_lock); 17303 17304 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17305 mutex_exit(&dtrace_lock); 17306 return (EINVAL); 17307 } 17308 17309 if (ecb->dte_probe == NULL) { 17310 mutex_exit(&dtrace_lock); 17311 return (EINVAL); 17312 } 17313 17314 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17315 epdesc.dtepd_uarg = ecb->dte_uarg; 17316 epdesc.dtepd_size = ecb->dte_size; 17317 17318 nrecs = epdesc.dtepd_nrecs; 17319 epdesc.dtepd_nrecs = 0; 17320 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17321 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17322 continue; 17323 17324 epdesc.dtepd_nrecs++; 17325 } 17326 17327 /* 17328 * Now that we have the size, we need to allocate a temporary 17329 * buffer in which to store the complete description. We need 17330 * the temporary buffer to be able to drop dtrace_lock() 17331 * across the copyout(), below. 17332 */ 17333 size = sizeof (dtrace_eprobedesc_t) + 17334 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17335 17336 buf = kmem_alloc(size, KM_SLEEP); 17337 dest = (uintptr_t)buf; 17338 17339 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17340 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17341 17342 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17343 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17344 continue; 17345 17346 if (nrecs-- == 0) 17347 break; 17348 17349 bcopy(&act->dta_rec, (void *)dest, 17350 sizeof (dtrace_recdesc_t)); 17351 dest += sizeof (dtrace_recdesc_t); 17352 } 17353 17354 mutex_exit(&dtrace_lock); 17355 17356 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17357 kmem_free(buf, size); 17358 return (EFAULT); 17359 } 17360 17361 kmem_free(buf, size); 17362 return (0); 17363 } 17364 17365 case DTRACEIOC_AGGDESC: { 17366 dtrace_aggdesc_t aggdesc; 17367 dtrace_action_t *act; 17368 dtrace_aggregation_t *agg; 17369 int nrecs; 17370 uint32_t offs; 17371 dtrace_recdesc_t *lrec; 17372 void *buf; 17373 size_t size; 17374 uintptr_t dest; 17375 17376 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17377 return (EFAULT); 17378 17379 mutex_enter(&dtrace_lock); 17380 17381 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17382 mutex_exit(&dtrace_lock); 17383 return (EINVAL); 17384 } 17385 17386 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17387 17388 nrecs = aggdesc.dtagd_nrecs; 17389 aggdesc.dtagd_nrecs = 0; 17390 17391 offs = agg->dtag_base; 17392 lrec = &agg->dtag_action.dta_rec; 17393 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17394 17395 for (act = agg->dtag_first; ; act = act->dta_next) { 17396 ASSERT(act->dta_intuple || 17397 DTRACEACT_ISAGG(act->dta_kind)); 17398 17399 /* 17400 * If this action has a record size of zero, it 17401 * denotes an argument to the aggregating action. 17402 * Because the presence of this record doesn't (or 17403 * shouldn't) affect the way the data is interpreted, 17404 * we don't copy it out to save user-level the 17405 * confusion of dealing with a zero-length record. 17406 */ 17407 if (act->dta_rec.dtrd_size == 0) { 17408 ASSERT(agg->dtag_hasarg); 17409 continue; 17410 } 17411 17412 aggdesc.dtagd_nrecs++; 17413 17414 if (act == &agg->dtag_action) 17415 break; 17416 } 17417 17418 /* 17419 * Now that we have the size, we need to allocate a temporary 17420 * buffer in which to store the complete description. We need 17421 * the temporary buffer to be able to drop dtrace_lock() 17422 * across the copyout(), below. 17423 */ 17424 size = sizeof (dtrace_aggdesc_t) + 17425 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17426 17427 buf = kmem_alloc(size, KM_SLEEP); 17428 dest = (uintptr_t)buf; 17429 17430 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17431 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17432 17433 for (act = agg->dtag_first; ; act = act->dta_next) { 17434 dtrace_recdesc_t rec = act->dta_rec; 17435 17436 /* 17437 * See the comment in the above loop for why we pass 17438 * over zero-length records. 17439 */ 17440 if (rec.dtrd_size == 0) { 17441 ASSERT(agg->dtag_hasarg); 17442 continue; 17443 } 17444 17445 if (nrecs-- == 0) 17446 break; 17447 17448 rec.dtrd_offset -= offs; 17449 bcopy(&rec, (void *)dest, sizeof (rec)); 17450 dest += sizeof (dtrace_recdesc_t); 17451 17452 if (act == &agg->dtag_action) 17453 break; 17454 } 17455 17456 mutex_exit(&dtrace_lock); 17457 17458 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17459 kmem_free(buf, size); 17460 return (EFAULT); 17461 } 17462 17463 kmem_free(buf, size); 17464 return (0); 17465 } 17466 17467 case DTRACEIOC_ENABLE: { 17468 dof_hdr_t *dof; 17469 dtrace_enabling_t *enab = NULL; 17470 dtrace_vstate_t *vstate; 17471 int err = 0; 17472 17473 *rv = 0; 17474 17475 /* 17476 * If a NULL argument has been passed, we take this as our 17477 * cue to reevaluate our enablings. 17478 */ 17479 if (arg == NULL) { 17480 dtrace_enabling_matchall(); 17481 17482 return (0); 17483 } 17484 17485 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17486 return (rval); 17487 17488 mutex_enter(&cpu_lock); 17489 mutex_enter(&dtrace_lock); 17490 vstate = &state->dts_vstate; 17491 17492 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17493 mutex_exit(&dtrace_lock); 17494 mutex_exit(&cpu_lock); 17495 dtrace_dof_destroy(dof); 17496 return (EBUSY); 17497 } 17498 17499 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17500 mutex_exit(&dtrace_lock); 17501 mutex_exit(&cpu_lock); 17502 dtrace_dof_destroy(dof); 17503 return (EINVAL); 17504 } 17505 17506 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17507 dtrace_enabling_destroy(enab); 17508 mutex_exit(&dtrace_lock); 17509 mutex_exit(&cpu_lock); 17510 dtrace_dof_destroy(dof); 17511 return (rval); 17512 } 17513 17514 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17515 err = dtrace_enabling_retain(enab); 17516 } else { 17517 dtrace_enabling_destroy(enab); 17518 } 17519 17520 mutex_exit(&cpu_lock); 17521 mutex_exit(&dtrace_lock); 17522 dtrace_dof_destroy(dof); 17523 17524 return (err); 17525 } 17526 17527 case DTRACEIOC_REPLICATE: { 17528 dtrace_repldesc_t desc; 17529 dtrace_probedesc_t *match = &desc.dtrpd_match; 17530 dtrace_probedesc_t *create = &desc.dtrpd_create; 17531 int err; 17532 17533 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17534 return (EFAULT); 17535 17536 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17537 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17538 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17539 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17540 17541 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17542 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17543 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17544 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17545 17546 mutex_enter(&dtrace_lock); 17547 err = dtrace_enabling_replicate(state, match, create); 17548 mutex_exit(&dtrace_lock); 17549 17550 return (err); 17551 } 17552 17553 case DTRACEIOC_PROBEMATCH: 17554 case DTRACEIOC_PROBES: { 17555 dtrace_probe_t *probe = NULL; 17556 dtrace_probedesc_t desc; 17557 dtrace_probekey_t pkey; 17558 dtrace_id_t i; 17559 int m = 0; 17560 uint32_t priv; 17561 uid_t uid; 17562 zoneid_t zoneid; 17563 17564 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17565 return (EFAULT); 17566 17567 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17568 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17569 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17570 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17571 17572 /* 17573 * Before we attempt to match this probe, we want to give 17574 * all providers the opportunity to provide it. 17575 */ 17576 if (desc.dtpd_id == DTRACE_IDNONE) { 17577 mutex_enter(&dtrace_provider_lock); 17578 dtrace_probe_provide(&desc, NULL); 17579 mutex_exit(&dtrace_provider_lock); 17580 desc.dtpd_id++; 17581 } 17582 17583 if (cmd == DTRACEIOC_PROBEMATCH) { 17584 dtrace_probekey(&desc, &pkey); 17585 pkey.dtpk_id = DTRACE_IDNONE; 17586 } 17587 17588 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17589 17590 mutex_enter(&dtrace_lock); 17591 17592 if (cmd == DTRACEIOC_PROBEMATCH) { 17593 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17594 if ((probe = dtrace_probes[i - 1]) != NULL && 17595 (m = dtrace_match_probe(probe, &pkey, 17596 priv, uid, zoneid)) != 0) 17597 break; 17598 } 17599 17600 if (m < 0) { 17601 mutex_exit(&dtrace_lock); 17602 return (EINVAL); 17603 } 17604 17605 } else { 17606 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17607 if ((probe = dtrace_probes[i - 1]) != NULL && 17608 dtrace_match_priv(probe, priv, uid, zoneid)) 17609 break; 17610 } 17611 } 17612 17613 if (probe == NULL) { 17614 mutex_exit(&dtrace_lock); 17615 return (ESRCH); 17616 } 17617 17618 dtrace_probe_description(probe, &desc); 17619 mutex_exit(&dtrace_lock); 17620 17621 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17622 return (EFAULT); 17623 17624 return (0); 17625 } 17626 17627 case DTRACEIOC_PROBEARG: { 17628 dtrace_argdesc_t desc; 17629 dtrace_probe_t *probe; 17630 dtrace_provider_t *prov; 17631 17632 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17633 return (EFAULT); 17634 17635 if (desc.dtargd_id == DTRACE_IDNONE) 17636 return (EINVAL); 17637 17638 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17639 return (EINVAL); 17640 17641 mutex_enter(&dtrace_provider_lock); 17642 mutex_enter(&mod_lock); 17643 mutex_enter(&dtrace_lock); 17644 17645 if (desc.dtargd_id > dtrace_nprobes) { 17646 mutex_exit(&dtrace_lock); 17647 mutex_exit(&mod_lock); 17648 mutex_exit(&dtrace_provider_lock); 17649 return (EINVAL); 17650 } 17651 17652 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17653 mutex_exit(&dtrace_lock); 17654 mutex_exit(&mod_lock); 17655 mutex_exit(&dtrace_provider_lock); 17656 return (EINVAL); 17657 } 17658 17659 mutex_exit(&dtrace_lock); 17660 17661 prov = probe->dtpr_provider; 17662 17663 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17664 /* 17665 * There isn't any typed information for this probe. 17666 * Set the argument number to DTRACE_ARGNONE. 17667 */ 17668 desc.dtargd_ndx = DTRACE_ARGNONE; 17669 } else { 17670 desc.dtargd_native[0] = '\0'; 17671 desc.dtargd_xlate[0] = '\0'; 17672 desc.dtargd_mapping = desc.dtargd_ndx; 17673 17674 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17675 probe->dtpr_id, probe->dtpr_arg, &desc); 17676 } 17677 17678 mutex_exit(&mod_lock); 17679 mutex_exit(&dtrace_provider_lock); 17680 17681 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17682 return (EFAULT); 17683 17684 return (0); 17685 } 17686 17687 case DTRACEIOC_GO: { 17688 processorid_t cpuid; 17689 rval = dtrace_state_go(state, &cpuid); 17690 17691 if (rval != 0) 17692 return (rval); 17693 17694 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17695 return (EFAULT); 17696 17697 return (0); 17698 } 17699 17700 case DTRACEIOC_STOP: { 17701 processorid_t cpuid; 17702 17703 mutex_enter(&dtrace_lock); 17704 rval = dtrace_state_stop(state, &cpuid); 17705 mutex_exit(&dtrace_lock); 17706 17707 if (rval != 0) 17708 return (rval); 17709 17710 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17711 return (EFAULT); 17712 17713 return (0); 17714 } 17715 17716 case DTRACEIOC_DOFGET: { 17717 dof_hdr_t hdr, *dof; 17718 uint64_t len; 17719 17720 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17721 return (EFAULT); 17722 17723 mutex_enter(&dtrace_lock); 17724 dof = dtrace_dof_create(state); 17725 mutex_exit(&dtrace_lock); 17726 17727 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17728 rval = copyout(dof, (void *)arg, len); 17729 dtrace_dof_destroy(dof); 17730 17731 return (rval == 0 ? 0 : EFAULT); 17732 } 17733 17734 case DTRACEIOC_AGGSNAP: 17735 case DTRACEIOC_BUFSNAP: { 17736 dtrace_bufdesc_t desc; 17737 caddr_t cached; 17738 dtrace_buffer_t *buf; 17739 17740 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17741 return (EFAULT); 17742 17743 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17744 return (EINVAL); 17745 17746 mutex_enter(&dtrace_lock); 17747 17748 if (cmd == DTRACEIOC_BUFSNAP) { 17749 buf = &state->dts_buffer[desc.dtbd_cpu]; 17750 } else { 17751 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17752 } 17753 17754 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17755 size_t sz = buf->dtb_offset; 17756 17757 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17758 mutex_exit(&dtrace_lock); 17759 return (EBUSY); 17760 } 17761 17762 /* 17763 * If this buffer has already been consumed, we're 17764 * going to indicate that there's nothing left here 17765 * to consume. 17766 */ 17767 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17768 mutex_exit(&dtrace_lock); 17769 17770 desc.dtbd_size = 0; 17771 desc.dtbd_drops = 0; 17772 desc.dtbd_errors = 0; 17773 desc.dtbd_oldest = 0; 17774 sz = sizeof (desc); 17775 17776 if (copyout(&desc, (void *)arg, sz) != 0) 17777 return (EFAULT); 17778 17779 return (0); 17780 } 17781 17782 /* 17783 * If this is a ring buffer that has wrapped, we want 17784 * to copy the whole thing out. 17785 */ 17786 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17787 dtrace_buffer_polish(buf); 17788 sz = buf->dtb_size; 17789 } 17790 17791 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17792 mutex_exit(&dtrace_lock); 17793 return (EFAULT); 17794 } 17795 17796 desc.dtbd_size = sz; 17797 desc.dtbd_drops = buf->dtb_drops; 17798 desc.dtbd_errors = buf->dtb_errors; 17799 desc.dtbd_oldest = buf->dtb_xamot_offset; 17800 desc.dtbd_timestamp = dtrace_gethrtime(); 17801 17802 mutex_exit(&dtrace_lock); 17803 17804 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17805 return (EFAULT); 17806 17807 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17808 17809 return (0); 17810 } 17811 17812 if (buf->dtb_tomax == NULL) { 17813 ASSERT(buf->dtb_xamot == NULL); 17814 mutex_exit(&dtrace_lock); 17815 return (ENOENT); 17816 } 17817 17818 cached = buf->dtb_tomax; 17819 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 17820 17821 dtrace_xcall(desc.dtbd_cpu, 17822 (dtrace_xcall_t)dtrace_buffer_switch, buf); 17823 17824 state->dts_errors += buf->dtb_xamot_errors; 17825 17826 /* 17827 * If the buffers did not actually switch, then the cross call 17828 * did not take place -- presumably because the given CPU is 17829 * not in the ready set. If this is the case, we'll return 17830 * ENOENT. 17831 */ 17832 if (buf->dtb_tomax == cached) { 17833 ASSERT(buf->dtb_xamot != cached); 17834 mutex_exit(&dtrace_lock); 17835 return (ENOENT); 17836 } 17837 17838 ASSERT(cached == buf->dtb_xamot); 17839 17840 /* 17841 * We have our snapshot; now copy it out. 17842 */ 17843 if (copyout(buf->dtb_xamot, desc.dtbd_data, 17844 buf->dtb_xamot_offset) != 0) { 17845 mutex_exit(&dtrace_lock); 17846 return (EFAULT); 17847 } 17848 17849 desc.dtbd_size = buf->dtb_xamot_offset; 17850 desc.dtbd_drops = buf->dtb_xamot_drops; 17851 desc.dtbd_errors = buf->dtb_xamot_errors; 17852 desc.dtbd_oldest = 0; 17853 desc.dtbd_timestamp = buf->dtb_switched; 17854 17855 mutex_exit(&dtrace_lock); 17856 17857 /* 17858 * Finally, copy out the buffer description. 17859 */ 17860 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17861 return (EFAULT); 17862 17863 return (0); 17864 } 17865 17866 case DTRACEIOC_CONF: { 17867 dtrace_conf_t conf; 17868 17869 bzero(&conf, sizeof (conf)); 17870 conf.dtc_difversion = DIF_VERSION; 17871 conf.dtc_difintregs = DIF_DIR_NREGS; 17872 conf.dtc_diftupregs = DIF_DTR_NREGS; 17873 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 17874 17875 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 17876 return (EFAULT); 17877 17878 return (0); 17879 } 17880 17881 case DTRACEIOC_STATUS: { 17882 dtrace_status_t stat; 17883 dtrace_dstate_t *dstate; 17884 int i, j; 17885 uint64_t nerrs; 17886 17887 /* 17888 * See the comment in dtrace_state_deadman() for the reason 17889 * for setting dts_laststatus to INT64_MAX before setting 17890 * it to the correct value. 17891 */ 17892 state->dts_laststatus = INT64_MAX; 17893 dtrace_membar_producer(); 17894 state->dts_laststatus = dtrace_gethrtime(); 17895 17896 bzero(&stat, sizeof (stat)); 17897 17898 mutex_enter(&dtrace_lock); 17899 17900 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 17901 mutex_exit(&dtrace_lock); 17902 return (ENOENT); 17903 } 17904 17905 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 17906 stat.dtst_exiting = 1; 17907 17908 nerrs = state->dts_errors; 17909 dstate = &state->dts_vstate.dtvs_dynvars; 17910 17911 for (i = 0; i < NCPU; i++) { 17912 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 17913 17914 stat.dtst_dyndrops += dcpu->dtdsc_drops; 17915 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 17916 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 17917 17918 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 17919 stat.dtst_filled++; 17920 17921 nerrs += state->dts_buffer[i].dtb_errors; 17922 17923 for (j = 0; j < state->dts_nspeculations; j++) { 17924 dtrace_speculation_t *spec; 17925 dtrace_buffer_t *buf; 17926 17927 spec = &state->dts_speculations[j]; 17928 buf = &spec->dtsp_buffer[i]; 17929 stat.dtst_specdrops += buf->dtb_xamot_drops; 17930 } 17931 } 17932 17933 stat.dtst_specdrops_busy = state->dts_speculations_busy; 17934 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 17935 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 17936 stat.dtst_dblerrors = state->dts_dblerrors; 17937 stat.dtst_killed = 17938 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 17939 stat.dtst_errors = nerrs; 17940 17941 mutex_exit(&dtrace_lock); 17942 17943 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 17944 return (EFAULT); 17945 17946 return (0); 17947 } 17948 17949 case DTRACEIOC_FORMAT: { 17950 dtrace_fmtdesc_t fmt; 17951 char *str; 17952 int len; 17953 17954 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 17955 return (EFAULT); 17956 17957 mutex_enter(&dtrace_lock); 17958 17959 if (fmt.dtfd_format == 0 || 17960 fmt.dtfd_format > state->dts_nformats) { 17961 mutex_exit(&dtrace_lock); 17962 return (EINVAL); 17963 } 17964 17965 /* 17966 * Format strings are allocated contiguously and they are 17967 * never freed; if a format index is less than the number 17968 * of formats, we can assert that the format map is non-NULL 17969 * and that the format for the specified index is non-NULL. 17970 */ 17971 ASSERT(state->dts_formats != NULL); 17972 str = state->dts_formats[fmt.dtfd_format - 1]; 17973 ASSERT(str != NULL); 17974 17975 len = strlen(str) + 1; 17976 17977 if (len > fmt.dtfd_length) { 17978 fmt.dtfd_length = len; 17979 17980 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 17981 mutex_exit(&dtrace_lock); 17982 return (EINVAL); 17983 } 17984 } else { 17985 if (copyout(str, fmt.dtfd_string, len) != 0) { 17986 mutex_exit(&dtrace_lock); 17987 return (EINVAL); 17988 } 17989 } 17990 17991 mutex_exit(&dtrace_lock); 17992 return (0); 17993 } 17994 17995 default: 17996 break; 17997 } 17998 17999 return (ENOTTY); 18000 } 18001 18002 /*ARGSUSED*/ 18003 static int 18004 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 18005 { 18006 dtrace_state_t *state; 18007 18008 switch (cmd) { 18009 case DDI_DETACH: 18010 break; 18011 18012 case DDI_SUSPEND: 18013 return (DDI_SUCCESS); 18014 18015 default: 18016 return (DDI_FAILURE); 18017 } 18018 18019 mutex_enter(&cpu_lock); 18020 mutex_enter(&dtrace_provider_lock); 18021 mutex_enter(&dtrace_lock); 18022 18023 ASSERT(dtrace_opens == 0); 18024 18025 if (dtrace_helpers > 0) { 18026 mutex_exit(&dtrace_provider_lock); 18027 mutex_exit(&dtrace_lock); 18028 mutex_exit(&cpu_lock); 18029 return (DDI_FAILURE); 18030 } 18031 18032 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 18033 mutex_exit(&dtrace_provider_lock); 18034 mutex_exit(&dtrace_lock); 18035 mutex_exit(&cpu_lock); 18036 return (DDI_FAILURE); 18037 } 18038 18039 dtrace_provider = NULL; 18040 18041 if ((state = dtrace_anon_grab()) != NULL) { 18042 /* 18043 * If there were ECBs on this state, the provider should 18044 * have not been allowed to detach; assert that there is 18045 * none. 18046 */ 18047 ASSERT(state->dts_necbs == 0); 18048 dtrace_state_destroy(state); 18049 18050 /* 18051 * If we're being detached with anonymous state, we need to 18052 * indicate to the kernel debugger that DTrace is now inactive. 18053 */ 18054 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 18055 } 18056 18057 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 18058 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 18059 dtrace_cpu_init = NULL; 18060 dtrace_helpers_cleanup = NULL; 18061 dtrace_helpers_fork = NULL; 18062 dtrace_cpustart_init = NULL; 18063 dtrace_cpustart_fini = NULL; 18064 dtrace_debugger_init = NULL; 18065 dtrace_debugger_fini = NULL; 18066 dtrace_modload = NULL; 18067 dtrace_modunload = NULL; 18068 18069 ASSERT(dtrace_getf == 0); 18070 ASSERT(dtrace_closef == NULL); 18071 18072 mutex_exit(&cpu_lock); 18073 18074 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 18075 dtrace_probes = NULL; 18076 dtrace_nprobes = 0; 18077 18078 dtrace_hash_destroy(dtrace_bymod); 18079 dtrace_hash_destroy(dtrace_byfunc); 18080 dtrace_hash_destroy(dtrace_byname); 18081 dtrace_bymod = NULL; 18082 dtrace_byfunc = NULL; 18083 dtrace_byname = NULL; 18084 18085 kmem_cache_destroy(dtrace_state_cache); 18086 vmem_destroy(dtrace_minor); 18087 vmem_destroy(dtrace_arena); 18088 18089 if (dtrace_toxrange != NULL) { 18090 kmem_free(dtrace_toxrange, 18091 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 18092 dtrace_toxrange = NULL; 18093 dtrace_toxranges = 0; 18094 dtrace_toxranges_max = 0; 18095 } 18096 18097 ddi_remove_minor_node(dtrace_devi, NULL); 18098 dtrace_devi = NULL; 18099 18100 ddi_soft_state_fini(&dtrace_softstate); 18101 18102 ASSERT(dtrace_vtime_references == 0); 18103 ASSERT(dtrace_opens == 0); 18104 ASSERT(dtrace_retained == NULL); 18105 18106 mutex_exit(&dtrace_lock); 18107 mutex_exit(&dtrace_provider_lock); 18108 18109 /* 18110 * We don't destroy the task queue until after we have dropped our 18111 * locks (taskq_destroy() may block on running tasks). To prevent 18112 * attempting to do work after we have effectively detached but before 18113 * the task queue has been destroyed, all tasks dispatched via the 18114 * task queue must check that DTrace is still attached before 18115 * performing any operation. 18116 */ 18117 taskq_destroy(dtrace_taskq); 18118 dtrace_taskq = NULL; 18119 18120 return (DDI_SUCCESS); 18121 } 18122 #endif 18123 18124 #ifdef illumos 18125 /*ARGSUSED*/ 18126 static int 18127 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 18128 { 18129 int error; 18130 18131 switch (infocmd) { 18132 case DDI_INFO_DEVT2DEVINFO: 18133 *result = (void *)dtrace_devi; 18134 error = DDI_SUCCESS; 18135 break; 18136 case DDI_INFO_DEVT2INSTANCE: 18137 *result = (void *)0; 18138 error = DDI_SUCCESS; 18139 break; 18140 default: 18141 error = DDI_FAILURE; 18142 } 18143 return (error); 18144 } 18145 #endif 18146 18147 #ifdef illumos 18148 static struct cb_ops dtrace_cb_ops = { 18149 dtrace_open, /* open */ 18150 dtrace_close, /* close */ 18151 nulldev, /* strategy */ 18152 nulldev, /* print */ 18153 nodev, /* dump */ 18154 nodev, /* read */ 18155 nodev, /* write */ 18156 dtrace_ioctl, /* ioctl */ 18157 nodev, /* devmap */ 18158 nodev, /* mmap */ 18159 nodev, /* segmap */ 18160 nochpoll, /* poll */ 18161 ddi_prop_op, /* cb_prop_op */ 18162 0, /* streamtab */ 18163 D_NEW | D_MP /* Driver compatibility flag */ 18164 }; 18165 18166 static struct dev_ops dtrace_ops = { 18167 DEVO_REV, /* devo_rev */ 18168 0, /* refcnt */ 18169 dtrace_info, /* get_dev_info */ 18170 nulldev, /* identify */ 18171 nulldev, /* probe */ 18172 dtrace_attach, /* attach */ 18173 dtrace_detach, /* detach */ 18174 nodev, /* reset */ 18175 &dtrace_cb_ops, /* driver operations */ 18176 NULL, /* bus operations */ 18177 nodev /* dev power */ 18178 }; 18179 18180 static struct modldrv modldrv = { 18181 &mod_driverops, /* module type (this is a pseudo driver) */ 18182 "Dynamic Tracing", /* name of module */ 18183 &dtrace_ops, /* driver ops */ 18184 }; 18185 18186 static struct modlinkage modlinkage = { 18187 MODREV_1, 18188 (void *)&modldrv, 18189 NULL 18190 }; 18191 18192 int 18193 _init(void) 18194 { 18195 return (mod_install(&modlinkage)); 18196 } 18197 18198 int 18199 _info(struct modinfo *modinfop) 18200 { 18201 return (mod_info(&modlinkage, modinfop)); 18202 } 18203 18204 int 18205 _fini(void) 18206 { 18207 return (mod_remove(&modlinkage)); 18208 } 18209 #else 18210 18211 static d_ioctl_t dtrace_ioctl; 18212 static d_ioctl_t dtrace_ioctl_helper; 18213 static void dtrace_load(void *); 18214 static int dtrace_unload(void); 18215 static struct cdev *dtrace_dev; 18216 static struct cdev *helper_dev; 18217 18218 void dtrace_invop_init(void); 18219 void dtrace_invop_uninit(void); 18220 18221 static struct cdevsw dtrace_cdevsw = { 18222 .d_version = D_VERSION, 18223 .d_ioctl = dtrace_ioctl, 18224 .d_open = dtrace_open, 18225 .d_name = "dtrace", 18226 }; 18227 18228 static struct cdevsw helper_cdevsw = { 18229 .d_version = D_VERSION, 18230 .d_ioctl = dtrace_ioctl_helper, 18231 .d_name = "helper", 18232 }; 18233 18234 #include <dtrace_anon.c> 18235 #include <dtrace_ioctl.c> 18236 #include <dtrace_load.c> 18237 #include <dtrace_modevent.c> 18238 #include <dtrace_sysctl.c> 18239 #include <dtrace_unload.c> 18240 #include <dtrace_vtime.c> 18241 #include <dtrace_hacks.c> 18242 #include <dtrace_isa.c> 18243 18244 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18245 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18246 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18247 18248 DEV_MODULE(dtrace, dtrace_modevent, NULL); 18249 MODULE_VERSION(dtrace, 1); 18250 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18251 #endif 18252