1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26 * Copyright (c) 2015, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * DTrace - Dynamic Tracing for Solaris 32 * 33 * This is the implementation of the Solaris Dynamic Tracing framework 34 * (DTrace). The user-visible interface to DTrace is described at length in 35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 36 * library, the in-kernel DTrace framework, and the DTrace providers are 37 * described in the block comments in the <sys/dtrace.h> header file. The 38 * internal architecture of DTrace is described in the block comments in the 39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 40 * implementation very much assume mastery of all of these sources; if one has 41 * an unanswered question about the implementation, one should consult them 42 * first. 43 * 44 * The functions here are ordered roughly as follows: 45 * 46 * - Probe context functions 47 * - Probe hashing functions 48 * - Non-probe context utility functions 49 * - Matching functions 50 * - Provider-to-Framework API functions 51 * - Probe management functions 52 * - DIF object functions 53 * - Format functions 54 * - Predicate functions 55 * - ECB functions 56 * - Buffer functions 57 * - Enabling functions 58 * - DOF functions 59 * - Anonymous enabling functions 60 * - Consumer state functions 61 * - Helper functions 62 * - Hook functions 63 * - Driver cookbook functions 64 * 65 * Each group of functions begins with a block comment labelled the "DTrace 66 * [Group] Functions", allowing one to find each block by searching forward 67 * on capital-f functions. 68 */ 69 #include <sys/errno.h> 70 #ifndef illumos 71 #include <sys/time.h> 72 #endif 73 #include <sys/stat.h> 74 #include <sys/modctl.h> 75 #include <sys/conf.h> 76 #include <sys/systm.h> 77 #ifdef illumos 78 #include <sys/ddi.h> 79 #include <sys/sunddi.h> 80 #endif 81 #include <sys/cpuvar.h> 82 #include <sys/kmem.h> 83 #ifdef illumos 84 #include <sys/strsubr.h> 85 #endif 86 #include <sys/sysmacros.h> 87 #include <sys/dtrace_impl.h> 88 #include <sys/atomic.h> 89 #include <sys/cmn_err.h> 90 #ifdef illumos 91 #include <sys/mutex_impl.h> 92 #include <sys/rwlock_impl.h> 93 #endif 94 #include <sys/ctf_api.h> 95 #ifdef illumos 96 #include <sys/panic.h> 97 #include <sys/priv_impl.h> 98 #endif 99 #include <sys/policy.h> 100 #ifdef illumos 101 #include <sys/cred_impl.h> 102 #include <sys/procfs_isa.h> 103 #endif 104 #include <sys/taskq.h> 105 #ifdef illumos 106 #include <sys/mkdev.h> 107 #include <sys/kdi.h> 108 #endif 109 #include <sys/zone.h> 110 #include <sys/socket.h> 111 #include <netinet/in.h> 112 #include "strtolctype.h" 113 114 /* FreeBSD includes: */ 115 #ifndef illumos 116 #include <sys/callout.h> 117 #include <sys/ctype.h> 118 #include <sys/eventhandler.h> 119 #include <sys/limits.h> 120 #include <sys/kdb.h> 121 #include <sys/kernel.h> 122 #include <sys/malloc.h> 123 #include <sys/sysctl.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/rwlock.h> 127 #include <sys/sx.h> 128 #include <sys/dtrace_bsd.h> 129 #include <netinet/in.h> 130 #include "dtrace_cddl.h" 131 #include "dtrace_debug.c" 132 #endif 133 134 /* 135 * DTrace Tunable Variables 136 * 137 * The following variables may be tuned by adding a line to /etc/system that 138 * includes both the name of the DTrace module ("dtrace") and the name of the 139 * variable. For example: 140 * 141 * set dtrace:dtrace_destructive_disallow = 1 142 * 143 * In general, the only variables that one should be tuning this way are those 144 * that affect system-wide DTrace behavior, and for which the default behavior 145 * is undesirable. Most of these variables are tunable on a per-consumer 146 * basis using DTrace options, and need not be tuned on a system-wide basis. 147 * When tuning these variables, avoid pathological values; while some attempt 148 * is made to verify the integrity of these variables, they are not considered 149 * part of the supported interface to DTrace, and they are therefore not 150 * checked comprehensively. Further, these variables should not be tuned 151 * dynamically via "mdb -kw" or other means; they should only be tuned via 152 * /etc/system. 153 */ 154 int dtrace_destructive_disallow = 0; 155 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 156 size_t dtrace_difo_maxsize = (256 * 1024); 157 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); 158 size_t dtrace_statvar_maxsize = (16 * 1024); 159 size_t dtrace_actions_max = (16 * 1024); 160 size_t dtrace_retain_max = 1024; 161 dtrace_optval_t dtrace_helper_actions_max = 128; 162 dtrace_optval_t dtrace_helper_providers_max = 32; 163 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 164 size_t dtrace_strsize_default = 256; 165 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 166 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 167 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 168 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 169 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 170 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 171 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 172 dtrace_optval_t dtrace_nspec_default = 1; 173 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 174 dtrace_optval_t dtrace_stackframes_default = 20; 175 dtrace_optval_t dtrace_ustackframes_default = 20; 176 dtrace_optval_t dtrace_jstackframes_default = 50; 177 dtrace_optval_t dtrace_jstackstrsize_default = 512; 178 int dtrace_msgdsize_max = 128; 179 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ 180 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 181 int dtrace_devdepth_max = 32; 182 int dtrace_err_verbose; 183 hrtime_t dtrace_deadman_interval = NANOSEC; 184 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 185 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 186 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 187 #ifndef illumos 188 int dtrace_memstr_max = 4096; 189 #endif 190 191 /* 192 * DTrace External Variables 193 * 194 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 195 * available to DTrace consumers via the backtick (`) syntax. One of these, 196 * dtrace_zero, is made deliberately so: it is provided as a source of 197 * well-known, zero-filled memory. While this variable is not documented, 198 * it is used by some translators as an implementation detail. 199 */ 200 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 201 202 /* 203 * DTrace Internal Variables 204 */ 205 #ifdef illumos 206 static dev_info_t *dtrace_devi; /* device info */ 207 #endif 208 #ifdef illumos 209 static vmem_t *dtrace_arena; /* probe ID arena */ 210 static vmem_t *dtrace_minor; /* minor number arena */ 211 #else 212 static taskq_t *dtrace_taskq; /* task queue */ 213 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 214 #endif 215 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 216 static int dtrace_nprobes; /* number of probes */ 217 static dtrace_provider_t *dtrace_provider; /* provider list */ 218 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 219 static int dtrace_opens; /* number of opens */ 220 static int dtrace_helpers; /* number of helpers */ 221 static int dtrace_getf; /* number of unpriv getf()s */ 222 #ifdef illumos 223 static void *dtrace_softstate; /* softstate pointer */ 224 #endif 225 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 226 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 227 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 228 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 229 static int dtrace_toxranges; /* number of toxic ranges */ 230 static int dtrace_toxranges_max; /* size of toxic range array */ 231 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 232 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 233 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 234 static kthread_t *dtrace_panicked; /* panicking thread */ 235 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 236 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 237 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 238 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 239 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 240 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 241 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 242 #ifndef illumos 243 static struct mtx dtrace_unr_mtx; 244 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 245 int dtrace_in_probe; /* non-zero if executing a probe */ 246 #if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 247 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 248 #endif 249 static eventhandler_tag dtrace_kld_load_tag; 250 static eventhandler_tag dtrace_kld_unload_try_tag; 251 #endif 252 253 /* 254 * DTrace Locking 255 * DTrace is protected by three (relatively coarse-grained) locks: 256 * 257 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 258 * including enabling state, probes, ECBs, consumer state, helper state, 259 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 260 * probe context is lock-free -- synchronization is handled via the 261 * dtrace_sync() cross call mechanism. 262 * 263 * (2) dtrace_provider_lock is required when manipulating provider state, or 264 * when provider state must be held constant. 265 * 266 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 267 * when meta provider state must be held constant. 268 * 269 * The lock ordering between these three locks is dtrace_meta_lock before 270 * dtrace_provider_lock before dtrace_lock. (In particular, there are 271 * several places where dtrace_provider_lock is held by the framework as it 272 * calls into the providers -- which then call back into the framework, 273 * grabbing dtrace_lock.) 274 * 275 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 276 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 277 * role as a coarse-grained lock; it is acquired before both of these locks. 278 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 279 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 280 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 281 * acquired _between_ dtrace_provider_lock and dtrace_lock. 282 */ 283 static kmutex_t dtrace_lock; /* probe state lock */ 284 static kmutex_t dtrace_provider_lock; /* provider state lock */ 285 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 286 287 #ifndef illumos 288 /* XXX FreeBSD hacks. */ 289 #define cr_suid cr_svuid 290 #define cr_sgid cr_svgid 291 #define ipaddr_t in_addr_t 292 #define mod_modname pathname 293 #define vuprintf vprintf 294 #define ttoproc(_a) ((_a)->td_proc) 295 #define crgetzoneid(_a) 0 296 #define NCPU MAXCPU 297 #define SNOCD 0 298 #define CPU_ON_INTR(_a) 0 299 300 #define PRIV_EFFECTIVE (1 << 0) 301 #define PRIV_DTRACE_KERNEL (1 << 1) 302 #define PRIV_DTRACE_PROC (1 << 2) 303 #define PRIV_DTRACE_USER (1 << 3) 304 #define PRIV_PROC_OWNER (1 << 4) 305 #define PRIV_PROC_ZONE (1 << 5) 306 #define PRIV_ALL ~0 307 308 SYSCTL_DECL(_debug_dtrace); 309 SYSCTL_DECL(_kern_dtrace); 310 #endif 311 312 #ifdef illumos 313 #define curcpu CPU->cpu_id 314 #endif 315 316 317 /* 318 * DTrace Provider Variables 319 * 320 * These are the variables relating to DTrace as a provider (that is, the 321 * provider of the BEGIN, END, and ERROR probes). 322 */ 323 static dtrace_pattr_t dtrace_provider_attr = { 324 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 325 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 326 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 327 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 328 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 329 }; 330 331 static void 332 dtrace_nullop(void) 333 {} 334 335 static dtrace_pops_t dtrace_provider_ops = { 336 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 337 (void (*)(void *, modctl_t *))dtrace_nullop, 338 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 339 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 340 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 341 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 342 NULL, 343 NULL, 344 NULL, 345 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 346 }; 347 348 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 349 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 350 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 351 352 /* 353 * DTrace Helper Tracing Variables 354 * 355 * These variables should be set dynamically to enable helper tracing. The 356 * only variables that should be set are dtrace_helptrace_enable (which should 357 * be set to a non-zero value to allocate helper tracing buffers on the next 358 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a 359 * non-zero value to deallocate helper tracing buffers on the next close of 360 * /dev/dtrace). When (and only when) helper tracing is disabled, the 361 * buffer size may also be set via dtrace_helptrace_bufsize. 362 */ 363 int dtrace_helptrace_enable = 0; 364 int dtrace_helptrace_disable = 0; 365 int dtrace_helptrace_bufsize = 16 * 1024 * 1024; 366 uint32_t dtrace_helptrace_nlocals; 367 static dtrace_helptrace_t *dtrace_helptrace_buffer; 368 static uint32_t dtrace_helptrace_next = 0; 369 static int dtrace_helptrace_wrapped = 0; 370 371 /* 372 * DTrace Error Hashing 373 * 374 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 375 * table. This is very useful for checking coverage of tests that are 376 * expected to induce DIF or DOF processing errors, and may be useful for 377 * debugging problems in the DIF code generator or in DOF generation . The 378 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 379 */ 380 #ifdef DEBUG 381 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 382 static const char *dtrace_errlast; 383 static kthread_t *dtrace_errthread; 384 static kmutex_t dtrace_errlock; 385 #endif 386 387 /* 388 * DTrace Macros and Constants 389 * 390 * These are various macros that are useful in various spots in the 391 * implementation, along with a few random constants that have no meaning 392 * outside of the implementation. There is no real structure to this cpp 393 * mishmash -- but is there ever? 394 */ 395 #define DTRACE_HASHSTR(hash, probe) \ 396 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 397 398 #define DTRACE_HASHNEXT(hash, probe) \ 399 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 400 401 #define DTRACE_HASHPREV(hash, probe) \ 402 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 403 404 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 405 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 406 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 407 408 #define DTRACE_AGGHASHSIZE_SLEW 17 409 410 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 411 412 /* 413 * The key for a thread-local variable consists of the lower 61 bits of the 414 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 415 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 416 * equal to a variable identifier. This is necessary (but not sufficient) to 417 * assure that global associative arrays never collide with thread-local 418 * variables. To guarantee that they cannot collide, we must also define the 419 * order for keying dynamic variables. That order is: 420 * 421 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 422 * 423 * Because the variable-key and the tls-key are in orthogonal spaces, there is 424 * no way for a global variable key signature to match a thread-local key 425 * signature. 426 */ 427 #ifdef illumos 428 #define DTRACE_TLS_THRKEY(where) { \ 429 uint_t intr = 0; \ 430 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 431 for (; actv; actv >>= 1) \ 432 intr++; \ 433 ASSERT(intr < (1 << 3)); \ 434 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 435 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 436 } 437 #else 438 #define DTRACE_TLS_THRKEY(where) { \ 439 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 440 uint_t intr = 0; \ 441 uint_t actv = _c->cpu_intr_actv; \ 442 for (; actv; actv >>= 1) \ 443 intr++; \ 444 ASSERT(intr < (1 << 3)); \ 445 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 446 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 447 } 448 #endif 449 450 #define DT_BSWAP_8(x) ((x) & 0xff) 451 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 452 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 453 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 454 455 #define DT_MASK_LO 0x00000000FFFFFFFFULL 456 457 #define DTRACE_STORE(type, tomax, offset, what) \ 458 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 459 460 #ifndef __x86 461 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 462 if (addr & (size - 1)) { \ 463 *flags |= CPU_DTRACE_BADALIGN; \ 464 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 465 return (0); \ 466 } 467 #else 468 #define DTRACE_ALIGNCHECK(addr, size, flags) 469 #endif 470 471 /* 472 * Test whether a range of memory starting at testaddr of size testsz falls 473 * within the range of memory described by addr, sz. We take care to avoid 474 * problems with overflow and underflow of the unsigned quantities, and 475 * disallow all negative sizes. Ranges of size 0 are allowed. 476 */ 477 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 478 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ 479 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ 480 (testaddr) + (testsz) >= (testaddr)) 481 482 /* 483 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 484 * alloc_sz on the righthand side of the comparison in order to avoid overflow 485 * or underflow in the comparison with it. This is simpler than the INRANGE 486 * check above, because we know that the dtms_scratch_ptr is valid in the 487 * range. Allocations of size zero are allowed. 488 */ 489 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 490 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 491 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 492 493 #define DTRACE_LOADFUNC(bits) \ 494 /*CSTYLED*/ \ 495 uint##bits##_t \ 496 dtrace_load##bits(uintptr_t addr) \ 497 { \ 498 size_t size = bits / NBBY; \ 499 /*CSTYLED*/ \ 500 uint##bits##_t rval; \ 501 int i; \ 502 volatile uint16_t *flags = (volatile uint16_t *) \ 503 &cpu_core[curcpu].cpuc_dtrace_flags; \ 504 \ 505 DTRACE_ALIGNCHECK(addr, size, flags); \ 506 \ 507 for (i = 0; i < dtrace_toxranges; i++) { \ 508 if (addr >= dtrace_toxrange[i].dtt_limit) \ 509 continue; \ 510 \ 511 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 512 continue; \ 513 \ 514 /* \ 515 * This address falls within a toxic region; return 0. \ 516 */ \ 517 *flags |= CPU_DTRACE_BADADDR; \ 518 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 519 return (0); \ 520 } \ 521 \ 522 *flags |= CPU_DTRACE_NOFAULT; \ 523 /*CSTYLED*/ \ 524 rval = *((volatile uint##bits##_t *)addr); \ 525 *flags &= ~CPU_DTRACE_NOFAULT; \ 526 \ 527 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 528 } 529 530 #ifdef _LP64 531 #define dtrace_loadptr dtrace_load64 532 #else 533 #define dtrace_loadptr dtrace_load32 534 #endif 535 536 #define DTRACE_DYNHASH_FREE 0 537 #define DTRACE_DYNHASH_SINK 1 538 #define DTRACE_DYNHASH_VALID 2 539 540 #define DTRACE_MATCH_NEXT 0 541 #define DTRACE_MATCH_DONE 1 542 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 543 #define DTRACE_STATE_ALIGN 64 544 545 #define DTRACE_FLAGS2FLT(flags) \ 546 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 547 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 548 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 549 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 550 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 551 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 552 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 553 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 554 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 555 DTRACEFLT_UNKNOWN) 556 557 #define DTRACEACT_ISSTRING(act) \ 558 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 559 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 560 561 /* Function prototype definitions: */ 562 static size_t dtrace_strlen(const char *, size_t); 563 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 564 static void dtrace_enabling_provide(dtrace_provider_t *); 565 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 566 static void dtrace_enabling_matchall(void); 567 static void dtrace_enabling_reap(void); 568 static dtrace_state_t *dtrace_anon_grab(void); 569 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 570 dtrace_state_t *, uint64_t, uint64_t); 571 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 572 static void dtrace_buffer_drop(dtrace_buffer_t *); 573 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 574 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 575 dtrace_state_t *, dtrace_mstate_t *); 576 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 577 dtrace_optval_t); 578 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 579 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 580 uint16_t dtrace_load16(uintptr_t); 581 uint32_t dtrace_load32(uintptr_t); 582 uint64_t dtrace_load64(uintptr_t); 583 uint8_t dtrace_load8(uintptr_t); 584 void dtrace_dynvar_clean(dtrace_dstate_t *); 585 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 586 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 587 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 588 static int dtrace_priv_proc(dtrace_state_t *); 589 static void dtrace_getf_barrier(void); 590 591 /* 592 * DTrace Probe Context Functions 593 * 594 * These functions are called from probe context. Because probe context is 595 * any context in which C may be called, arbitrarily locks may be held, 596 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 597 * As a result, functions called from probe context may only call other DTrace 598 * support functions -- they may not interact at all with the system at large. 599 * (Note that the ASSERT macro is made probe-context safe by redefining it in 600 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 601 * loads are to be performed from probe context, they _must_ be in terms of 602 * the safe dtrace_load*() variants. 603 * 604 * Some functions in this block are not actually called from probe context; 605 * for these functions, there will be a comment above the function reading 606 * "Note: not called from probe context." 607 */ 608 void 609 dtrace_panic(const char *format, ...) 610 { 611 va_list alist; 612 613 va_start(alist, format); 614 #ifdef __FreeBSD__ 615 vpanic(format, alist); 616 #else 617 dtrace_vpanic(format, alist); 618 #endif 619 va_end(alist); 620 } 621 622 int 623 dtrace_assfail(const char *a, const char *f, int l) 624 { 625 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 626 627 /* 628 * We just need something here that even the most clever compiler 629 * cannot optimize away. 630 */ 631 return (a[(uintptr_t)f]); 632 } 633 634 /* 635 * Atomically increment a specified error counter from probe context. 636 */ 637 static void 638 dtrace_error(uint32_t *counter) 639 { 640 /* 641 * Most counters stored to in probe context are per-CPU counters. 642 * However, there are some error conditions that are sufficiently 643 * arcane that they don't merit per-CPU storage. If these counters 644 * are incremented concurrently on different CPUs, scalability will be 645 * adversely affected -- but we don't expect them to be white-hot in a 646 * correctly constructed enabling... 647 */ 648 uint32_t oval, nval; 649 650 do { 651 oval = *counter; 652 653 if ((nval = oval + 1) == 0) { 654 /* 655 * If the counter would wrap, set it to 1 -- assuring 656 * that the counter is never zero when we have seen 657 * errors. (The counter must be 32-bits because we 658 * aren't guaranteed a 64-bit compare&swap operation.) 659 * To save this code both the infamy of being fingered 660 * by a priggish news story and the indignity of being 661 * the target of a neo-puritan witch trial, we're 662 * carefully avoiding any colorful description of the 663 * likelihood of this condition -- but suffice it to 664 * say that it is only slightly more likely than the 665 * overflow of predicate cache IDs, as discussed in 666 * dtrace_predicate_create(). 667 */ 668 nval = 1; 669 } 670 } while (dtrace_cas32(counter, oval, nval) != oval); 671 } 672 673 /* 674 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 675 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 676 */ 677 DTRACE_LOADFUNC(8) 678 DTRACE_LOADFUNC(16) 679 DTRACE_LOADFUNC(32) 680 DTRACE_LOADFUNC(64) 681 682 static int 683 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 684 { 685 if (dest < mstate->dtms_scratch_base) 686 return (0); 687 688 if (dest + size < dest) 689 return (0); 690 691 if (dest + size > mstate->dtms_scratch_ptr) 692 return (0); 693 694 return (1); 695 } 696 697 static int 698 dtrace_canstore_statvar(uint64_t addr, size_t sz, 699 dtrace_statvar_t **svars, int nsvars) 700 { 701 int i; 702 size_t maxglobalsize, maxlocalsize; 703 704 if (nsvars == 0) 705 return (0); 706 707 maxglobalsize = dtrace_statvar_maxsize; 708 maxlocalsize = (maxglobalsize + sizeof (uint64_t)) * NCPU; 709 710 for (i = 0; i < nsvars; i++) { 711 dtrace_statvar_t *svar = svars[i]; 712 uint8_t scope; 713 size_t size; 714 715 if (svar == NULL || (size = svar->dtsv_size) == 0) 716 continue; 717 718 scope = svar->dtsv_var.dtdv_scope; 719 720 /* 721 * We verify that our size is valid in the spirit of providing 722 * defense in depth: we want to prevent attackers from using 723 * DTrace to escalate an orthogonal kernel heap corruption bug 724 * into the ability to store to arbitrary locations in memory. 725 */ 726 VERIFY((scope == DIFV_SCOPE_GLOBAL && size < maxglobalsize) || 727 (scope == DIFV_SCOPE_LOCAL && size < maxlocalsize)); 728 729 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 730 return (1); 731 } 732 733 return (0); 734 } 735 736 /* 737 * Check to see if the address is within a memory region to which a store may 738 * be issued. This includes the DTrace scratch areas, and any DTrace variable 739 * region. The caller of dtrace_canstore() is responsible for performing any 740 * alignment checks that are needed before stores are actually executed. 741 */ 742 static int 743 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 744 dtrace_vstate_t *vstate) 745 { 746 /* 747 * First, check to see if the address is in scratch space... 748 */ 749 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 750 mstate->dtms_scratch_size)) 751 return (1); 752 753 /* 754 * Now check to see if it's a dynamic variable. This check will pick 755 * up both thread-local variables and any global dynamically-allocated 756 * variables. 757 */ 758 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, 759 vstate->dtvs_dynvars.dtds_size)) { 760 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 761 uintptr_t base = (uintptr_t)dstate->dtds_base + 762 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 763 uintptr_t chunkoffs; 764 765 /* 766 * Before we assume that we can store here, we need to make 767 * sure that it isn't in our metadata -- storing to our 768 * dynamic variable metadata would corrupt our state. For 769 * the range to not include any dynamic variable metadata, 770 * it must: 771 * 772 * (1) Start above the hash table that is at the base of 773 * the dynamic variable space 774 * 775 * (2) Have a starting chunk offset that is beyond the 776 * dtrace_dynvar_t that is at the base of every chunk 777 * 778 * (3) Not span a chunk boundary 779 * 780 */ 781 if (addr < base) 782 return (0); 783 784 chunkoffs = (addr - base) % dstate->dtds_chunksize; 785 786 if (chunkoffs < sizeof (dtrace_dynvar_t)) 787 return (0); 788 789 if (chunkoffs + sz > dstate->dtds_chunksize) 790 return (0); 791 792 return (1); 793 } 794 795 /* 796 * Finally, check the static local and global variables. These checks 797 * take the longest, so we perform them last. 798 */ 799 if (dtrace_canstore_statvar(addr, sz, 800 vstate->dtvs_locals, vstate->dtvs_nlocals)) 801 return (1); 802 803 if (dtrace_canstore_statvar(addr, sz, 804 vstate->dtvs_globals, vstate->dtvs_nglobals)) 805 return (1); 806 807 return (0); 808 } 809 810 811 /* 812 * Convenience routine to check to see if the address is within a memory 813 * region in which a load may be issued given the user's privilege level; 814 * if not, it sets the appropriate error flags and loads 'addr' into the 815 * illegal value slot. 816 * 817 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 818 * appropriate memory access protection. 819 */ 820 static int 821 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 822 dtrace_vstate_t *vstate) 823 { 824 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 825 file_t *fp; 826 827 /* 828 * If we hold the privilege to read from kernel memory, then 829 * everything is readable. 830 */ 831 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 832 return (1); 833 834 /* 835 * You can obviously read that which you can store. 836 */ 837 if (dtrace_canstore(addr, sz, mstate, vstate)) 838 return (1); 839 840 /* 841 * We're allowed to read from our own string table. 842 */ 843 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, 844 mstate->dtms_difo->dtdo_strlen)) 845 return (1); 846 847 if (vstate->dtvs_state != NULL && 848 dtrace_priv_proc(vstate->dtvs_state)) { 849 proc_t *p; 850 851 /* 852 * When we have privileges to the current process, there are 853 * several context-related kernel structures that are safe to 854 * read, even absent the privilege to read from kernel memory. 855 * These reads are safe because these structures contain only 856 * state that (1) we're permitted to read, (2) is harmless or 857 * (3) contains pointers to additional kernel state that we're 858 * not permitted to read (and as such, do not present an 859 * opportunity for privilege escalation). Finally (and 860 * critically), because of the nature of their relation with 861 * the current thread context, the memory associated with these 862 * structures cannot change over the duration of probe context, 863 * and it is therefore impossible for this memory to be 864 * deallocated and reallocated as something else while it's 865 * being operated upon. 866 */ 867 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) 868 return (1); 869 870 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, 871 sz, curthread->t_procp, sizeof (proc_t))) { 872 return (1); 873 } 874 875 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, 876 curthread->t_cred, sizeof (cred_t))) { 877 return (1); 878 } 879 880 #ifdef illumos 881 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, 882 &(p->p_pidp->pid_id), sizeof (pid_t))) { 883 return (1); 884 } 885 886 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, 887 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { 888 return (1); 889 } 890 #endif 891 } 892 893 if ((fp = mstate->dtms_getf) != NULL) { 894 uintptr_t psz = sizeof (void *); 895 vnode_t *vp; 896 vnodeops_t *op; 897 898 /* 899 * When getf() returns a file_t, the enabling is implicitly 900 * granted the (transient) right to read the returned file_t 901 * as well as the v_path and v_op->vnop_name of the underlying 902 * vnode. These accesses are allowed after a successful 903 * getf() because the members that they refer to cannot change 904 * once set -- and the barrier logic in the kernel's closef() 905 * path assures that the file_t and its referenced vode_t 906 * cannot themselves be stale (that is, it impossible for 907 * either dtms_getf itself or its f_vnode member to reference 908 * freed memory). 909 */ 910 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) 911 return (1); 912 913 if ((vp = fp->f_vnode) != NULL) { 914 #ifdef illumos 915 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) 916 return (1); 917 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz, 918 vp->v_path, strlen(vp->v_path) + 1)) { 919 return (1); 920 } 921 #endif 922 923 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) 924 return (1); 925 926 #ifdef illumos 927 if ((op = vp->v_op) != NULL && 928 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { 929 return (1); 930 } 931 932 if (op != NULL && op->vnop_name != NULL && 933 DTRACE_INRANGE(addr, sz, op->vnop_name, 934 strlen(op->vnop_name) + 1)) { 935 return (1); 936 } 937 #endif 938 } 939 } 940 941 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 942 *illval = addr; 943 return (0); 944 } 945 946 /* 947 * Convenience routine to check to see if a given string is within a memory 948 * region in which a load may be issued given the user's privilege level; 949 * this exists so that we don't need to issue unnecessary dtrace_strlen() 950 * calls in the event that the user has all privileges. 951 */ 952 static int 953 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 954 dtrace_vstate_t *vstate) 955 { 956 size_t strsz; 957 958 /* 959 * If we hold the privilege to read from kernel memory, then 960 * everything is readable. 961 */ 962 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 963 return (1); 964 965 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 966 if (dtrace_canload(addr, strsz, mstate, vstate)) 967 return (1); 968 969 return (0); 970 } 971 972 /* 973 * Convenience routine to check to see if a given variable is within a memory 974 * region in which a load may be issued given the user's privilege level. 975 */ 976 static int 977 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 978 dtrace_vstate_t *vstate) 979 { 980 size_t sz; 981 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 982 983 /* 984 * If we hold the privilege to read from kernel memory, then 985 * everything is readable. 986 */ 987 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 988 return (1); 989 990 if (type->dtdt_kind == DIF_TYPE_STRING) 991 sz = dtrace_strlen(src, 992 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 993 else 994 sz = type->dtdt_size; 995 996 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 997 } 998 999 /* 1000 * Convert a string to a signed integer using safe loads. 1001 * 1002 * NOTE: This function uses various macros from strtolctype.h to manipulate 1003 * digit values, etc -- these have all been checked to ensure they make 1004 * no additional function calls. 1005 */ 1006 static int64_t 1007 dtrace_strtoll(char *input, int base, size_t limit) 1008 { 1009 uintptr_t pos = (uintptr_t)input; 1010 int64_t val = 0; 1011 int x; 1012 boolean_t neg = B_FALSE; 1013 char c, cc, ccc; 1014 uintptr_t end = pos + limit; 1015 1016 /* 1017 * Consume any whitespace preceding digits. 1018 */ 1019 while ((c = dtrace_load8(pos)) == ' ' || c == '\t') 1020 pos++; 1021 1022 /* 1023 * Handle an explicit sign if one is present. 1024 */ 1025 if (c == '-' || c == '+') { 1026 if (c == '-') 1027 neg = B_TRUE; 1028 c = dtrace_load8(++pos); 1029 } 1030 1031 /* 1032 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it 1033 * if present. 1034 */ 1035 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || 1036 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { 1037 pos += 2; 1038 c = ccc; 1039 } 1040 1041 /* 1042 * Read in contiguous digits until the first non-digit character. 1043 */ 1044 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; 1045 c = dtrace_load8(++pos)) 1046 val = val * base + x; 1047 1048 return (neg ? -val : val); 1049 } 1050 1051 /* 1052 * Compare two strings using safe loads. 1053 */ 1054 static int 1055 dtrace_strncmp(char *s1, char *s2, size_t limit) 1056 { 1057 uint8_t c1, c2; 1058 volatile uint16_t *flags; 1059 1060 if (s1 == s2 || limit == 0) 1061 return (0); 1062 1063 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1064 1065 do { 1066 if (s1 == NULL) { 1067 c1 = '\0'; 1068 } else { 1069 c1 = dtrace_load8((uintptr_t)s1++); 1070 } 1071 1072 if (s2 == NULL) { 1073 c2 = '\0'; 1074 } else { 1075 c2 = dtrace_load8((uintptr_t)s2++); 1076 } 1077 1078 if (c1 != c2) 1079 return (c1 - c2); 1080 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 1081 1082 return (0); 1083 } 1084 1085 /* 1086 * Compute strlen(s) for a string using safe memory accesses. The additional 1087 * len parameter is used to specify a maximum length to ensure completion. 1088 */ 1089 static size_t 1090 dtrace_strlen(const char *s, size_t lim) 1091 { 1092 uint_t len; 1093 1094 for (len = 0; len != lim; len++) { 1095 if (dtrace_load8((uintptr_t)s++) == '\0') 1096 break; 1097 } 1098 1099 return (len); 1100 } 1101 1102 /* 1103 * Check if an address falls within a toxic region. 1104 */ 1105 static int 1106 dtrace_istoxic(uintptr_t kaddr, size_t size) 1107 { 1108 uintptr_t taddr, tsize; 1109 int i; 1110 1111 for (i = 0; i < dtrace_toxranges; i++) { 1112 taddr = dtrace_toxrange[i].dtt_base; 1113 tsize = dtrace_toxrange[i].dtt_limit - taddr; 1114 1115 if (kaddr - taddr < tsize) { 1116 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1117 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 1118 return (1); 1119 } 1120 1121 if (taddr - kaddr < size) { 1122 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 1123 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 1124 return (1); 1125 } 1126 } 1127 1128 return (0); 1129 } 1130 1131 /* 1132 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 1133 * memory specified by the DIF program. The dst is assumed to be safe memory 1134 * that we can store to directly because it is managed by DTrace. As with 1135 * standard bcopy, overlapping copies are handled properly. 1136 */ 1137 static void 1138 dtrace_bcopy(const void *src, void *dst, size_t len) 1139 { 1140 if (len != 0) { 1141 uint8_t *s1 = dst; 1142 const uint8_t *s2 = src; 1143 1144 if (s1 <= s2) { 1145 do { 1146 *s1++ = dtrace_load8((uintptr_t)s2++); 1147 } while (--len != 0); 1148 } else { 1149 s2 += len; 1150 s1 += len; 1151 1152 do { 1153 *--s1 = dtrace_load8((uintptr_t)--s2); 1154 } while (--len != 0); 1155 } 1156 } 1157 } 1158 1159 /* 1160 * Copy src to dst using safe memory accesses, up to either the specified 1161 * length, or the point that a nul byte is encountered. The src is assumed to 1162 * be unsafe memory specified by the DIF program. The dst is assumed to be 1163 * safe memory that we can store to directly because it is managed by DTrace. 1164 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1165 */ 1166 static void 1167 dtrace_strcpy(const void *src, void *dst, size_t len) 1168 { 1169 if (len != 0) { 1170 uint8_t *s1 = dst, c; 1171 const uint8_t *s2 = src; 1172 1173 do { 1174 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1175 } while (--len != 0 && c != '\0'); 1176 } 1177 } 1178 1179 /* 1180 * Copy src to dst, deriving the size and type from the specified (BYREF) 1181 * variable type. The src is assumed to be unsafe memory specified by the DIF 1182 * program. The dst is assumed to be DTrace variable memory that is of the 1183 * specified type; we assume that we can store to directly. 1184 */ 1185 static void 1186 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1187 { 1188 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1189 1190 if (type->dtdt_kind == DIF_TYPE_STRING) { 1191 dtrace_strcpy(src, dst, type->dtdt_size); 1192 } else { 1193 dtrace_bcopy(src, dst, type->dtdt_size); 1194 } 1195 } 1196 1197 /* 1198 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1199 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1200 * safe memory that we can access directly because it is managed by DTrace. 1201 */ 1202 static int 1203 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1204 { 1205 volatile uint16_t *flags; 1206 1207 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1208 1209 if (s1 == s2) 1210 return (0); 1211 1212 if (s1 == NULL || s2 == NULL) 1213 return (1); 1214 1215 if (s1 != s2 && len != 0) { 1216 const uint8_t *ps1 = s1; 1217 const uint8_t *ps2 = s2; 1218 1219 do { 1220 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1221 return (1); 1222 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1223 } 1224 return (0); 1225 } 1226 1227 /* 1228 * Zero the specified region using a simple byte-by-byte loop. Note that this 1229 * is for safe DTrace-managed memory only. 1230 */ 1231 static void 1232 dtrace_bzero(void *dst, size_t len) 1233 { 1234 uchar_t *cp; 1235 1236 for (cp = dst; len != 0; len--) 1237 *cp++ = 0; 1238 } 1239 1240 static void 1241 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1242 { 1243 uint64_t result[2]; 1244 1245 result[0] = addend1[0] + addend2[0]; 1246 result[1] = addend1[1] + addend2[1] + 1247 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1248 1249 sum[0] = result[0]; 1250 sum[1] = result[1]; 1251 } 1252 1253 /* 1254 * Shift the 128-bit value in a by b. If b is positive, shift left. 1255 * If b is negative, shift right. 1256 */ 1257 static void 1258 dtrace_shift_128(uint64_t *a, int b) 1259 { 1260 uint64_t mask; 1261 1262 if (b == 0) 1263 return; 1264 1265 if (b < 0) { 1266 b = -b; 1267 if (b >= 64) { 1268 a[0] = a[1] >> (b - 64); 1269 a[1] = 0; 1270 } else { 1271 a[0] >>= b; 1272 mask = 1LL << (64 - b); 1273 mask -= 1; 1274 a[0] |= ((a[1] & mask) << (64 - b)); 1275 a[1] >>= b; 1276 } 1277 } else { 1278 if (b >= 64) { 1279 a[1] = a[0] << (b - 64); 1280 a[0] = 0; 1281 } else { 1282 a[1] <<= b; 1283 mask = a[0] >> (64 - b); 1284 a[1] |= mask; 1285 a[0] <<= b; 1286 } 1287 } 1288 } 1289 1290 /* 1291 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1292 * use native multiplication on those, and then re-combine into the 1293 * resulting 128-bit value. 1294 * 1295 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1296 * hi1 * hi2 << 64 + 1297 * hi1 * lo2 << 32 + 1298 * hi2 * lo1 << 32 + 1299 * lo1 * lo2 1300 */ 1301 static void 1302 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1303 { 1304 uint64_t hi1, hi2, lo1, lo2; 1305 uint64_t tmp[2]; 1306 1307 hi1 = factor1 >> 32; 1308 hi2 = factor2 >> 32; 1309 1310 lo1 = factor1 & DT_MASK_LO; 1311 lo2 = factor2 & DT_MASK_LO; 1312 1313 product[0] = lo1 * lo2; 1314 product[1] = hi1 * hi2; 1315 1316 tmp[0] = hi1 * lo2; 1317 tmp[1] = 0; 1318 dtrace_shift_128(tmp, 32); 1319 dtrace_add_128(product, tmp, product); 1320 1321 tmp[0] = hi2 * lo1; 1322 tmp[1] = 0; 1323 dtrace_shift_128(tmp, 32); 1324 dtrace_add_128(product, tmp, product); 1325 } 1326 1327 /* 1328 * This privilege check should be used by actions and subroutines to 1329 * verify that the user credentials of the process that enabled the 1330 * invoking ECB match the target credentials 1331 */ 1332 static int 1333 dtrace_priv_proc_common_user(dtrace_state_t *state) 1334 { 1335 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1336 1337 /* 1338 * We should always have a non-NULL state cred here, since if cred 1339 * is null (anonymous tracing), we fast-path bypass this routine. 1340 */ 1341 ASSERT(s_cr != NULL); 1342 1343 if ((cr = CRED()) != NULL && 1344 s_cr->cr_uid == cr->cr_uid && 1345 s_cr->cr_uid == cr->cr_ruid && 1346 s_cr->cr_uid == cr->cr_suid && 1347 s_cr->cr_gid == cr->cr_gid && 1348 s_cr->cr_gid == cr->cr_rgid && 1349 s_cr->cr_gid == cr->cr_sgid) 1350 return (1); 1351 1352 return (0); 1353 } 1354 1355 /* 1356 * This privilege check should be used by actions and subroutines to 1357 * verify that the zone of the process that enabled the invoking ECB 1358 * matches the target credentials 1359 */ 1360 static int 1361 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1362 { 1363 #ifdef illumos 1364 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1365 1366 /* 1367 * We should always have a non-NULL state cred here, since if cred 1368 * is null (anonymous tracing), we fast-path bypass this routine. 1369 */ 1370 ASSERT(s_cr != NULL); 1371 1372 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) 1373 return (1); 1374 1375 return (0); 1376 #else 1377 return (1); 1378 #endif 1379 } 1380 1381 /* 1382 * This privilege check should be used by actions and subroutines to 1383 * verify that the process has not setuid or changed credentials. 1384 */ 1385 static int 1386 dtrace_priv_proc_common_nocd(void) 1387 { 1388 proc_t *proc; 1389 1390 if ((proc = ttoproc(curthread)) != NULL && 1391 !(proc->p_flag & SNOCD)) 1392 return (1); 1393 1394 return (0); 1395 } 1396 1397 static int 1398 dtrace_priv_proc_destructive(dtrace_state_t *state) 1399 { 1400 int action = state->dts_cred.dcr_action; 1401 1402 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1403 dtrace_priv_proc_common_zone(state) == 0) 1404 goto bad; 1405 1406 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1407 dtrace_priv_proc_common_user(state) == 0) 1408 goto bad; 1409 1410 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1411 dtrace_priv_proc_common_nocd() == 0) 1412 goto bad; 1413 1414 return (1); 1415 1416 bad: 1417 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1418 1419 return (0); 1420 } 1421 1422 static int 1423 dtrace_priv_proc_control(dtrace_state_t *state) 1424 { 1425 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1426 return (1); 1427 1428 if (dtrace_priv_proc_common_zone(state) && 1429 dtrace_priv_proc_common_user(state) && 1430 dtrace_priv_proc_common_nocd()) 1431 return (1); 1432 1433 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1434 1435 return (0); 1436 } 1437 1438 static int 1439 dtrace_priv_proc(dtrace_state_t *state) 1440 { 1441 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1442 return (1); 1443 1444 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1445 1446 return (0); 1447 } 1448 1449 static int 1450 dtrace_priv_kernel(dtrace_state_t *state) 1451 { 1452 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1453 return (1); 1454 1455 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1456 1457 return (0); 1458 } 1459 1460 static int 1461 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1462 { 1463 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1464 return (1); 1465 1466 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1467 1468 return (0); 1469 } 1470 1471 /* 1472 * Determine if the dte_cond of the specified ECB allows for processing of 1473 * the current probe to continue. Note that this routine may allow continued 1474 * processing, but with access(es) stripped from the mstate's dtms_access 1475 * field. 1476 */ 1477 static int 1478 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, 1479 dtrace_ecb_t *ecb) 1480 { 1481 dtrace_probe_t *probe = ecb->dte_probe; 1482 dtrace_provider_t *prov = probe->dtpr_provider; 1483 dtrace_pops_t *pops = &prov->dtpv_pops; 1484 int mode = DTRACE_MODE_NOPRIV_DROP; 1485 1486 ASSERT(ecb->dte_cond); 1487 1488 #ifdef illumos 1489 if (pops->dtps_mode != NULL) { 1490 mode = pops->dtps_mode(prov->dtpv_arg, 1491 probe->dtpr_id, probe->dtpr_arg); 1492 1493 ASSERT((mode & DTRACE_MODE_USER) || 1494 (mode & DTRACE_MODE_KERNEL)); 1495 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || 1496 (mode & DTRACE_MODE_NOPRIV_DROP)); 1497 } 1498 1499 /* 1500 * If the dte_cond bits indicate that this consumer is only allowed to 1501 * see user-mode firings of this probe, call the provider's dtps_mode() 1502 * entry point to check that the probe was fired while in a user 1503 * context. If that's not the case, use the policy specified by the 1504 * provider to determine if we drop the probe or merely restrict 1505 * operation. 1506 */ 1507 if (ecb->dte_cond & DTRACE_COND_USERMODE) { 1508 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); 1509 1510 if (!(mode & DTRACE_MODE_USER)) { 1511 if (mode & DTRACE_MODE_NOPRIV_DROP) 1512 return (0); 1513 1514 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; 1515 } 1516 } 1517 #endif 1518 1519 /* 1520 * This is more subtle than it looks. We have to be absolutely certain 1521 * that CRED() isn't going to change out from under us so it's only 1522 * legit to examine that structure if we're in constrained situations. 1523 * Currently, the only times we'll this check is if a non-super-user 1524 * has enabled the profile or syscall providers -- providers that 1525 * allow visibility of all processes. For the profile case, the check 1526 * above will ensure that we're examining a user context. 1527 */ 1528 if (ecb->dte_cond & DTRACE_COND_OWNER) { 1529 cred_t *cr; 1530 cred_t *s_cr = state->dts_cred.dcr_cred; 1531 proc_t *proc; 1532 1533 ASSERT(s_cr != NULL); 1534 1535 if ((cr = CRED()) == NULL || 1536 s_cr->cr_uid != cr->cr_uid || 1537 s_cr->cr_uid != cr->cr_ruid || 1538 s_cr->cr_uid != cr->cr_suid || 1539 s_cr->cr_gid != cr->cr_gid || 1540 s_cr->cr_gid != cr->cr_rgid || 1541 s_cr->cr_gid != cr->cr_sgid || 1542 (proc = ttoproc(curthread)) == NULL || 1543 (proc->p_flag & SNOCD)) { 1544 if (mode & DTRACE_MODE_NOPRIV_DROP) 1545 return (0); 1546 1547 #ifdef illumos 1548 mstate->dtms_access &= ~DTRACE_ACCESS_PROC; 1549 #endif 1550 } 1551 } 1552 1553 #ifdef illumos 1554 /* 1555 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not 1556 * in our zone, check to see if our mode policy is to restrict rather 1557 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC 1558 * and DTRACE_ACCESS_ARGS 1559 */ 1560 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 1561 cred_t *cr; 1562 cred_t *s_cr = state->dts_cred.dcr_cred; 1563 1564 ASSERT(s_cr != NULL); 1565 1566 if ((cr = CRED()) == NULL || 1567 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { 1568 if (mode & DTRACE_MODE_NOPRIV_DROP) 1569 return (0); 1570 1571 mstate->dtms_access &= 1572 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); 1573 } 1574 } 1575 #endif 1576 1577 return (1); 1578 } 1579 1580 /* 1581 * Note: not called from probe context. This function is called 1582 * asynchronously (and at a regular interval) from outside of probe context to 1583 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1584 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1585 */ 1586 void 1587 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1588 { 1589 dtrace_dynvar_t *dirty; 1590 dtrace_dstate_percpu_t *dcpu; 1591 dtrace_dynvar_t **rinsep; 1592 int i, j, work = 0; 1593 1594 for (i = 0; i < NCPU; i++) { 1595 dcpu = &dstate->dtds_percpu[i]; 1596 rinsep = &dcpu->dtdsc_rinsing; 1597 1598 /* 1599 * If the dirty list is NULL, there is no dirty work to do. 1600 */ 1601 if (dcpu->dtdsc_dirty == NULL) 1602 continue; 1603 1604 if (dcpu->dtdsc_rinsing != NULL) { 1605 /* 1606 * If the rinsing list is non-NULL, then it is because 1607 * this CPU was selected to accept another CPU's 1608 * dirty list -- and since that time, dirty buffers 1609 * have accumulated. This is a highly unlikely 1610 * condition, but we choose to ignore the dirty 1611 * buffers -- they'll be picked up a future cleanse. 1612 */ 1613 continue; 1614 } 1615 1616 if (dcpu->dtdsc_clean != NULL) { 1617 /* 1618 * If the clean list is non-NULL, then we're in a 1619 * situation where a CPU has done deallocations (we 1620 * have a non-NULL dirty list) but no allocations (we 1621 * also have a non-NULL clean list). We can't simply 1622 * move the dirty list into the clean list on this 1623 * CPU, yet we also don't want to allow this condition 1624 * to persist, lest a short clean list prevent a 1625 * massive dirty list from being cleaned (which in 1626 * turn could lead to otherwise avoidable dynamic 1627 * drops). To deal with this, we look for some CPU 1628 * with a NULL clean list, NULL dirty list, and NULL 1629 * rinsing list -- and then we borrow this CPU to 1630 * rinse our dirty list. 1631 */ 1632 for (j = 0; j < NCPU; j++) { 1633 dtrace_dstate_percpu_t *rinser; 1634 1635 rinser = &dstate->dtds_percpu[j]; 1636 1637 if (rinser->dtdsc_rinsing != NULL) 1638 continue; 1639 1640 if (rinser->dtdsc_dirty != NULL) 1641 continue; 1642 1643 if (rinser->dtdsc_clean != NULL) 1644 continue; 1645 1646 rinsep = &rinser->dtdsc_rinsing; 1647 break; 1648 } 1649 1650 if (j == NCPU) { 1651 /* 1652 * We were unable to find another CPU that 1653 * could accept this dirty list -- we are 1654 * therefore unable to clean it now. 1655 */ 1656 dtrace_dynvar_failclean++; 1657 continue; 1658 } 1659 } 1660 1661 work = 1; 1662 1663 /* 1664 * Atomically move the dirty list aside. 1665 */ 1666 do { 1667 dirty = dcpu->dtdsc_dirty; 1668 1669 /* 1670 * Before we zap the dirty list, set the rinsing list. 1671 * (This allows for a potential assertion in 1672 * dtrace_dynvar(): if a free dynamic variable appears 1673 * on a hash chain, either the dirty list or the 1674 * rinsing list for some CPU must be non-NULL.) 1675 */ 1676 *rinsep = dirty; 1677 dtrace_membar_producer(); 1678 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1679 dirty, NULL) != dirty); 1680 } 1681 1682 if (!work) { 1683 /* 1684 * We have no work to do; we can simply return. 1685 */ 1686 return; 1687 } 1688 1689 dtrace_sync(); 1690 1691 for (i = 0; i < NCPU; i++) { 1692 dcpu = &dstate->dtds_percpu[i]; 1693 1694 if (dcpu->dtdsc_rinsing == NULL) 1695 continue; 1696 1697 /* 1698 * We are now guaranteed that no hash chain contains a pointer 1699 * into this dirty list; we can make it clean. 1700 */ 1701 ASSERT(dcpu->dtdsc_clean == NULL); 1702 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1703 dcpu->dtdsc_rinsing = NULL; 1704 } 1705 1706 /* 1707 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1708 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1709 * This prevents a race whereby a CPU incorrectly decides that 1710 * the state should be something other than DTRACE_DSTATE_CLEAN 1711 * after dtrace_dynvar_clean() has completed. 1712 */ 1713 dtrace_sync(); 1714 1715 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1716 } 1717 1718 /* 1719 * Depending on the value of the op parameter, this function looks-up, 1720 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1721 * allocation is requested, this function will return a pointer to a 1722 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1723 * variable can be allocated. If NULL is returned, the appropriate counter 1724 * will be incremented. 1725 */ 1726 dtrace_dynvar_t * 1727 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1728 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1729 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1730 { 1731 uint64_t hashval = DTRACE_DYNHASH_VALID; 1732 dtrace_dynhash_t *hash = dstate->dtds_hash; 1733 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1734 processorid_t me = curcpu, cpu = me; 1735 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1736 size_t bucket, ksize; 1737 size_t chunksize = dstate->dtds_chunksize; 1738 uintptr_t kdata, lock, nstate; 1739 uint_t i; 1740 1741 ASSERT(nkeys != 0); 1742 1743 /* 1744 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1745 * algorithm. For the by-value portions, we perform the algorithm in 1746 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1747 * bit, and seems to have only a minute effect on distribution. For 1748 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1749 * over each referenced byte. It's painful to do this, but it's much 1750 * better than pathological hash distribution. The efficacy of the 1751 * hashing algorithm (and a comparison with other algorithms) may be 1752 * found by running the ::dtrace_dynstat MDB dcmd. 1753 */ 1754 for (i = 0; i < nkeys; i++) { 1755 if (key[i].dttk_size == 0) { 1756 uint64_t val = key[i].dttk_value; 1757 1758 hashval += (val >> 48) & 0xffff; 1759 hashval += (hashval << 10); 1760 hashval ^= (hashval >> 6); 1761 1762 hashval += (val >> 32) & 0xffff; 1763 hashval += (hashval << 10); 1764 hashval ^= (hashval >> 6); 1765 1766 hashval += (val >> 16) & 0xffff; 1767 hashval += (hashval << 10); 1768 hashval ^= (hashval >> 6); 1769 1770 hashval += val & 0xffff; 1771 hashval += (hashval << 10); 1772 hashval ^= (hashval >> 6); 1773 } else { 1774 /* 1775 * This is incredibly painful, but it beats the hell 1776 * out of the alternative. 1777 */ 1778 uint64_t j, size = key[i].dttk_size; 1779 uintptr_t base = (uintptr_t)key[i].dttk_value; 1780 1781 if (!dtrace_canload(base, size, mstate, vstate)) 1782 break; 1783 1784 for (j = 0; j < size; j++) { 1785 hashval += dtrace_load8(base + j); 1786 hashval += (hashval << 10); 1787 hashval ^= (hashval >> 6); 1788 } 1789 } 1790 } 1791 1792 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1793 return (NULL); 1794 1795 hashval += (hashval << 3); 1796 hashval ^= (hashval >> 11); 1797 hashval += (hashval << 15); 1798 1799 /* 1800 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1801 * comes out to be one of our two sentinel hash values. If this 1802 * actually happens, we set the hashval to be a value known to be a 1803 * non-sentinel value. 1804 */ 1805 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1806 hashval = DTRACE_DYNHASH_VALID; 1807 1808 /* 1809 * Yes, it's painful to do a divide here. If the cycle count becomes 1810 * important here, tricks can be pulled to reduce it. (However, it's 1811 * critical that hash collisions be kept to an absolute minimum; 1812 * they're much more painful than a divide.) It's better to have a 1813 * solution that generates few collisions and still keeps things 1814 * relatively simple. 1815 */ 1816 bucket = hashval % dstate->dtds_hashsize; 1817 1818 if (op == DTRACE_DYNVAR_DEALLOC) { 1819 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1820 1821 for (;;) { 1822 while ((lock = *lockp) & 1) 1823 continue; 1824 1825 if (dtrace_casptr((volatile void *)lockp, 1826 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1827 break; 1828 } 1829 1830 dtrace_membar_producer(); 1831 } 1832 1833 top: 1834 prev = NULL; 1835 lock = hash[bucket].dtdh_lock; 1836 1837 dtrace_membar_consumer(); 1838 1839 start = hash[bucket].dtdh_chain; 1840 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1841 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1842 op != DTRACE_DYNVAR_DEALLOC)); 1843 1844 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1845 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1846 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1847 1848 if (dvar->dtdv_hashval != hashval) { 1849 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1850 /* 1851 * We've reached the sink, and therefore the 1852 * end of the hash chain; we can kick out of 1853 * the loop knowing that we have seen a valid 1854 * snapshot of state. 1855 */ 1856 ASSERT(dvar->dtdv_next == NULL); 1857 ASSERT(dvar == &dtrace_dynhash_sink); 1858 break; 1859 } 1860 1861 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1862 /* 1863 * We've gone off the rails: somewhere along 1864 * the line, one of the members of this hash 1865 * chain was deleted. Note that we could also 1866 * detect this by simply letting this loop run 1867 * to completion, as we would eventually hit 1868 * the end of the dirty list. However, we 1869 * want to avoid running the length of the 1870 * dirty list unnecessarily (it might be quite 1871 * long), so we catch this as early as 1872 * possible by detecting the hash marker. In 1873 * this case, we simply set dvar to NULL and 1874 * break; the conditional after the loop will 1875 * send us back to top. 1876 */ 1877 dvar = NULL; 1878 break; 1879 } 1880 1881 goto next; 1882 } 1883 1884 if (dtuple->dtt_nkeys != nkeys) 1885 goto next; 1886 1887 for (i = 0; i < nkeys; i++, dkey++) { 1888 if (dkey->dttk_size != key[i].dttk_size) 1889 goto next; /* size or type mismatch */ 1890 1891 if (dkey->dttk_size != 0) { 1892 if (dtrace_bcmp( 1893 (void *)(uintptr_t)key[i].dttk_value, 1894 (void *)(uintptr_t)dkey->dttk_value, 1895 dkey->dttk_size)) 1896 goto next; 1897 } else { 1898 if (dkey->dttk_value != key[i].dttk_value) 1899 goto next; 1900 } 1901 } 1902 1903 if (op != DTRACE_DYNVAR_DEALLOC) 1904 return (dvar); 1905 1906 ASSERT(dvar->dtdv_next == NULL || 1907 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1908 1909 if (prev != NULL) { 1910 ASSERT(hash[bucket].dtdh_chain != dvar); 1911 ASSERT(start != dvar); 1912 ASSERT(prev->dtdv_next == dvar); 1913 prev->dtdv_next = dvar->dtdv_next; 1914 } else { 1915 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1916 start, dvar->dtdv_next) != start) { 1917 /* 1918 * We have failed to atomically swing the 1919 * hash table head pointer, presumably because 1920 * of a conflicting allocation on another CPU. 1921 * We need to reread the hash chain and try 1922 * again. 1923 */ 1924 goto top; 1925 } 1926 } 1927 1928 dtrace_membar_producer(); 1929 1930 /* 1931 * Now set the hash value to indicate that it's free. 1932 */ 1933 ASSERT(hash[bucket].dtdh_chain != dvar); 1934 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1935 1936 dtrace_membar_producer(); 1937 1938 /* 1939 * Set the next pointer to point at the dirty list, and 1940 * atomically swing the dirty pointer to the newly freed dvar. 1941 */ 1942 do { 1943 next = dcpu->dtdsc_dirty; 1944 dvar->dtdv_next = next; 1945 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1946 1947 /* 1948 * Finally, unlock this hash bucket. 1949 */ 1950 ASSERT(hash[bucket].dtdh_lock == lock); 1951 ASSERT(lock & 1); 1952 hash[bucket].dtdh_lock++; 1953 1954 return (NULL); 1955 next: 1956 prev = dvar; 1957 continue; 1958 } 1959 1960 if (dvar == NULL) { 1961 /* 1962 * If dvar is NULL, it is because we went off the rails: 1963 * one of the elements that we traversed in the hash chain 1964 * was deleted while we were traversing it. In this case, 1965 * we assert that we aren't doing a dealloc (deallocs lock 1966 * the hash bucket to prevent themselves from racing with 1967 * one another), and retry the hash chain traversal. 1968 */ 1969 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1970 goto top; 1971 } 1972 1973 if (op != DTRACE_DYNVAR_ALLOC) { 1974 /* 1975 * If we are not to allocate a new variable, we want to 1976 * return NULL now. Before we return, check that the value 1977 * of the lock word hasn't changed. If it has, we may have 1978 * seen an inconsistent snapshot. 1979 */ 1980 if (op == DTRACE_DYNVAR_NOALLOC) { 1981 if (hash[bucket].dtdh_lock != lock) 1982 goto top; 1983 } else { 1984 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1985 ASSERT(hash[bucket].dtdh_lock == lock); 1986 ASSERT(lock & 1); 1987 hash[bucket].dtdh_lock++; 1988 } 1989 1990 return (NULL); 1991 } 1992 1993 /* 1994 * We need to allocate a new dynamic variable. The size we need is the 1995 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1996 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1997 * the size of any referred-to data (dsize). We then round the final 1998 * size up to the chunksize for allocation. 1999 */ 2000 for (ksize = 0, i = 0; i < nkeys; i++) 2001 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 2002 2003 /* 2004 * This should be pretty much impossible, but could happen if, say, 2005 * strange DIF specified the tuple. Ideally, this should be an 2006 * assertion and not an error condition -- but that requires that the 2007 * chunksize calculation in dtrace_difo_chunksize() be absolutely 2008 * bullet-proof. (That is, it must not be able to be fooled by 2009 * malicious DIF.) Given the lack of backwards branches in DIF, 2010 * solving this would presumably not amount to solving the Halting 2011 * Problem -- but it still seems awfully hard. 2012 */ 2013 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 2014 ksize + dsize > chunksize) { 2015 dcpu->dtdsc_drops++; 2016 return (NULL); 2017 } 2018 2019 nstate = DTRACE_DSTATE_EMPTY; 2020 2021 do { 2022 retry: 2023 free = dcpu->dtdsc_free; 2024 2025 if (free == NULL) { 2026 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 2027 void *rval; 2028 2029 if (clean == NULL) { 2030 /* 2031 * We're out of dynamic variable space on 2032 * this CPU. Unless we have tried all CPUs, 2033 * we'll try to allocate from a different 2034 * CPU. 2035 */ 2036 switch (dstate->dtds_state) { 2037 case DTRACE_DSTATE_CLEAN: { 2038 void *sp = &dstate->dtds_state; 2039 2040 if (++cpu >= NCPU) 2041 cpu = 0; 2042 2043 if (dcpu->dtdsc_dirty != NULL && 2044 nstate == DTRACE_DSTATE_EMPTY) 2045 nstate = DTRACE_DSTATE_DIRTY; 2046 2047 if (dcpu->dtdsc_rinsing != NULL) 2048 nstate = DTRACE_DSTATE_RINSING; 2049 2050 dcpu = &dstate->dtds_percpu[cpu]; 2051 2052 if (cpu != me) 2053 goto retry; 2054 2055 (void) dtrace_cas32(sp, 2056 DTRACE_DSTATE_CLEAN, nstate); 2057 2058 /* 2059 * To increment the correct bean 2060 * counter, take another lap. 2061 */ 2062 goto retry; 2063 } 2064 2065 case DTRACE_DSTATE_DIRTY: 2066 dcpu->dtdsc_dirty_drops++; 2067 break; 2068 2069 case DTRACE_DSTATE_RINSING: 2070 dcpu->dtdsc_rinsing_drops++; 2071 break; 2072 2073 case DTRACE_DSTATE_EMPTY: 2074 dcpu->dtdsc_drops++; 2075 break; 2076 } 2077 2078 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 2079 return (NULL); 2080 } 2081 2082 /* 2083 * The clean list appears to be non-empty. We want to 2084 * move the clean list to the free list; we start by 2085 * moving the clean pointer aside. 2086 */ 2087 if (dtrace_casptr(&dcpu->dtdsc_clean, 2088 clean, NULL) != clean) { 2089 /* 2090 * We are in one of two situations: 2091 * 2092 * (a) The clean list was switched to the 2093 * free list by another CPU. 2094 * 2095 * (b) The clean list was added to by the 2096 * cleansing cyclic. 2097 * 2098 * In either of these situations, we can 2099 * just reattempt the free list allocation. 2100 */ 2101 goto retry; 2102 } 2103 2104 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 2105 2106 /* 2107 * Now we'll move the clean list to our free list. 2108 * It's impossible for this to fail: the only way 2109 * the free list can be updated is through this 2110 * code path, and only one CPU can own the clean list. 2111 * Thus, it would only be possible for this to fail if 2112 * this code were racing with dtrace_dynvar_clean(). 2113 * (That is, if dtrace_dynvar_clean() updated the clean 2114 * list, and we ended up racing to update the free 2115 * list.) This race is prevented by the dtrace_sync() 2116 * in dtrace_dynvar_clean() -- which flushes the 2117 * owners of the clean lists out before resetting 2118 * the clean lists. 2119 */ 2120 dcpu = &dstate->dtds_percpu[me]; 2121 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 2122 ASSERT(rval == NULL); 2123 goto retry; 2124 } 2125 2126 dvar = free; 2127 new_free = dvar->dtdv_next; 2128 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 2129 2130 /* 2131 * We have now allocated a new chunk. We copy the tuple keys into the 2132 * tuple array and copy any referenced key data into the data space 2133 * following the tuple array. As we do this, we relocate dttk_value 2134 * in the final tuple to point to the key data address in the chunk. 2135 */ 2136 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 2137 dvar->dtdv_data = (void *)(kdata + ksize); 2138 dvar->dtdv_tuple.dtt_nkeys = nkeys; 2139 2140 for (i = 0; i < nkeys; i++) { 2141 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 2142 size_t kesize = key[i].dttk_size; 2143 2144 if (kesize != 0) { 2145 dtrace_bcopy( 2146 (const void *)(uintptr_t)key[i].dttk_value, 2147 (void *)kdata, kesize); 2148 dkey->dttk_value = kdata; 2149 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 2150 } else { 2151 dkey->dttk_value = key[i].dttk_value; 2152 } 2153 2154 dkey->dttk_size = kesize; 2155 } 2156 2157 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 2158 dvar->dtdv_hashval = hashval; 2159 dvar->dtdv_next = start; 2160 2161 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 2162 return (dvar); 2163 2164 /* 2165 * The cas has failed. Either another CPU is adding an element to 2166 * this hash chain, or another CPU is deleting an element from this 2167 * hash chain. The simplest way to deal with both of these cases 2168 * (though not necessarily the most efficient) is to free our 2169 * allocated block and re-attempt it all. Note that the free is 2170 * to the dirty list and _not_ to the free list. This is to prevent 2171 * races with allocators, above. 2172 */ 2173 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 2174 2175 dtrace_membar_producer(); 2176 2177 do { 2178 free = dcpu->dtdsc_dirty; 2179 dvar->dtdv_next = free; 2180 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 2181 2182 goto top; 2183 } 2184 2185 /*ARGSUSED*/ 2186 static void 2187 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 2188 { 2189 if ((int64_t)nval < (int64_t)*oval) 2190 *oval = nval; 2191 } 2192 2193 /*ARGSUSED*/ 2194 static void 2195 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 2196 { 2197 if ((int64_t)nval > (int64_t)*oval) 2198 *oval = nval; 2199 } 2200 2201 static void 2202 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 2203 { 2204 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 2205 int64_t val = (int64_t)nval; 2206 2207 if (val < 0) { 2208 for (i = 0; i < zero; i++) { 2209 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 2210 quanta[i] += incr; 2211 return; 2212 } 2213 } 2214 } else { 2215 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 2216 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 2217 quanta[i - 1] += incr; 2218 return; 2219 } 2220 } 2221 2222 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 2223 return; 2224 } 2225 2226 ASSERT(0); 2227 } 2228 2229 static void 2230 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 2231 { 2232 uint64_t arg = *lquanta++; 2233 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 2234 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 2235 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 2236 int32_t val = (int32_t)nval, level; 2237 2238 ASSERT(step != 0); 2239 ASSERT(levels != 0); 2240 2241 if (val < base) { 2242 /* 2243 * This is an underflow. 2244 */ 2245 lquanta[0] += incr; 2246 return; 2247 } 2248 2249 level = (val - base) / step; 2250 2251 if (level < levels) { 2252 lquanta[level + 1] += incr; 2253 return; 2254 } 2255 2256 /* 2257 * This is an overflow. 2258 */ 2259 lquanta[levels + 1] += incr; 2260 } 2261 2262 static int 2263 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 2264 uint16_t high, uint16_t nsteps, int64_t value) 2265 { 2266 int64_t this = 1, last, next; 2267 int base = 1, order; 2268 2269 ASSERT(factor <= nsteps); 2270 ASSERT(nsteps % factor == 0); 2271 2272 for (order = 0; order < low; order++) 2273 this *= factor; 2274 2275 /* 2276 * If our value is less than our factor taken to the power of the 2277 * low order of magnitude, it goes into the zeroth bucket. 2278 */ 2279 if (value < (last = this)) 2280 return (0); 2281 2282 for (this *= factor; order <= high; order++) { 2283 int nbuckets = this > nsteps ? nsteps : this; 2284 2285 if ((next = this * factor) < this) { 2286 /* 2287 * We should not generally get log/linear quantizations 2288 * with a high magnitude that allows 64-bits to 2289 * overflow, but we nonetheless protect against this 2290 * by explicitly checking for overflow, and clamping 2291 * our value accordingly. 2292 */ 2293 value = this - 1; 2294 } 2295 2296 if (value < this) { 2297 /* 2298 * If our value lies within this order of magnitude, 2299 * determine its position by taking the offset within 2300 * the order of magnitude, dividing by the bucket 2301 * width, and adding to our (accumulated) base. 2302 */ 2303 return (base + (value - last) / (this / nbuckets)); 2304 } 2305 2306 base += nbuckets - (nbuckets / factor); 2307 last = this; 2308 this = next; 2309 } 2310 2311 /* 2312 * Our value is greater than or equal to our factor taken to the 2313 * power of one plus the high magnitude -- return the top bucket. 2314 */ 2315 return (base); 2316 } 2317 2318 static void 2319 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 2320 { 2321 uint64_t arg = *llquanta++; 2322 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 2323 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 2324 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 2325 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 2326 2327 llquanta[dtrace_aggregate_llquantize_bucket(factor, 2328 low, high, nsteps, nval)] += incr; 2329 } 2330 2331 /*ARGSUSED*/ 2332 static void 2333 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 2334 { 2335 data[0]++; 2336 data[1] += nval; 2337 } 2338 2339 /*ARGSUSED*/ 2340 static void 2341 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 2342 { 2343 int64_t snval = (int64_t)nval; 2344 uint64_t tmp[2]; 2345 2346 data[0]++; 2347 data[1] += nval; 2348 2349 /* 2350 * What we want to say here is: 2351 * 2352 * data[2] += nval * nval; 2353 * 2354 * But given that nval is 64-bit, we could easily overflow, so 2355 * we do this as 128-bit arithmetic. 2356 */ 2357 if (snval < 0) 2358 snval = -snval; 2359 2360 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2361 dtrace_add_128(data + 2, tmp, data + 2); 2362 } 2363 2364 /*ARGSUSED*/ 2365 static void 2366 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2367 { 2368 *oval = *oval + 1; 2369 } 2370 2371 /*ARGSUSED*/ 2372 static void 2373 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2374 { 2375 *oval += nval; 2376 } 2377 2378 /* 2379 * Aggregate given the tuple in the principal data buffer, and the aggregating 2380 * action denoted by the specified dtrace_aggregation_t. The aggregation 2381 * buffer is specified as the buf parameter. This routine does not return 2382 * failure; if there is no space in the aggregation buffer, the data will be 2383 * dropped, and a corresponding counter incremented. 2384 */ 2385 static void 2386 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2387 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2388 { 2389 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2390 uint32_t i, ndx, size, fsize; 2391 uint32_t align = sizeof (uint64_t) - 1; 2392 dtrace_aggbuffer_t *agb; 2393 dtrace_aggkey_t *key; 2394 uint32_t hashval = 0, limit, isstr; 2395 caddr_t tomax, data, kdata; 2396 dtrace_actkind_t action; 2397 dtrace_action_t *act; 2398 uintptr_t offs; 2399 2400 if (buf == NULL) 2401 return; 2402 2403 if (!agg->dtag_hasarg) { 2404 /* 2405 * Currently, only quantize() and lquantize() take additional 2406 * arguments, and they have the same semantics: an increment 2407 * value that defaults to 1 when not present. If additional 2408 * aggregating actions take arguments, the setting of the 2409 * default argument value will presumably have to become more 2410 * sophisticated... 2411 */ 2412 arg = 1; 2413 } 2414 2415 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2416 size = rec->dtrd_offset - agg->dtag_base; 2417 fsize = size + rec->dtrd_size; 2418 2419 ASSERT(dbuf->dtb_tomax != NULL); 2420 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2421 2422 if ((tomax = buf->dtb_tomax) == NULL) { 2423 dtrace_buffer_drop(buf); 2424 return; 2425 } 2426 2427 /* 2428 * The metastructure is always at the bottom of the buffer. 2429 */ 2430 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2431 sizeof (dtrace_aggbuffer_t)); 2432 2433 if (buf->dtb_offset == 0) { 2434 /* 2435 * We just kludge up approximately 1/8th of the size to be 2436 * buckets. If this guess ends up being routinely 2437 * off-the-mark, we may need to dynamically readjust this 2438 * based on past performance. 2439 */ 2440 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2441 2442 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2443 (uintptr_t)tomax || hashsize == 0) { 2444 /* 2445 * We've been given a ludicrously small buffer; 2446 * increment our drop count and leave. 2447 */ 2448 dtrace_buffer_drop(buf); 2449 return; 2450 } 2451 2452 /* 2453 * And now, a pathetic attempt to try to get a an odd (or 2454 * perchance, a prime) hash size for better hash distribution. 2455 */ 2456 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2457 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2458 2459 agb->dtagb_hashsize = hashsize; 2460 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2461 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2462 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2463 2464 for (i = 0; i < agb->dtagb_hashsize; i++) 2465 agb->dtagb_hash[i] = NULL; 2466 } 2467 2468 ASSERT(agg->dtag_first != NULL); 2469 ASSERT(agg->dtag_first->dta_intuple); 2470 2471 /* 2472 * Calculate the hash value based on the key. Note that we _don't_ 2473 * include the aggid in the hashing (but we will store it as part of 2474 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2475 * algorithm: a simple, quick algorithm that has no known funnels, and 2476 * gets good distribution in practice. The efficacy of the hashing 2477 * algorithm (and a comparison with other algorithms) may be found by 2478 * running the ::dtrace_aggstat MDB dcmd. 2479 */ 2480 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2481 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2482 limit = i + act->dta_rec.dtrd_size; 2483 ASSERT(limit <= size); 2484 isstr = DTRACEACT_ISSTRING(act); 2485 2486 for (; i < limit; i++) { 2487 hashval += data[i]; 2488 hashval += (hashval << 10); 2489 hashval ^= (hashval >> 6); 2490 2491 if (isstr && data[i] == '\0') 2492 break; 2493 } 2494 } 2495 2496 hashval += (hashval << 3); 2497 hashval ^= (hashval >> 11); 2498 hashval += (hashval << 15); 2499 2500 /* 2501 * Yes, the divide here is expensive -- but it's generally the least 2502 * of the performance issues given the amount of data that we iterate 2503 * over to compute hash values, compare data, etc. 2504 */ 2505 ndx = hashval % agb->dtagb_hashsize; 2506 2507 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2508 ASSERT((caddr_t)key >= tomax); 2509 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2510 2511 if (hashval != key->dtak_hashval || key->dtak_size != size) 2512 continue; 2513 2514 kdata = key->dtak_data; 2515 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2516 2517 for (act = agg->dtag_first; act->dta_intuple; 2518 act = act->dta_next) { 2519 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2520 limit = i + act->dta_rec.dtrd_size; 2521 ASSERT(limit <= size); 2522 isstr = DTRACEACT_ISSTRING(act); 2523 2524 for (; i < limit; i++) { 2525 if (kdata[i] != data[i]) 2526 goto next; 2527 2528 if (isstr && data[i] == '\0') 2529 break; 2530 } 2531 } 2532 2533 if (action != key->dtak_action) { 2534 /* 2535 * We are aggregating on the same value in the same 2536 * aggregation with two different aggregating actions. 2537 * (This should have been picked up in the compiler, 2538 * so we may be dealing with errant or devious DIF.) 2539 * This is an error condition; we indicate as much, 2540 * and return. 2541 */ 2542 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2543 return; 2544 } 2545 2546 /* 2547 * This is a hit: we need to apply the aggregator to 2548 * the value at this key. 2549 */ 2550 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2551 return; 2552 next: 2553 continue; 2554 } 2555 2556 /* 2557 * We didn't find it. We need to allocate some zero-filled space, 2558 * link it into the hash table appropriately, and apply the aggregator 2559 * to the (zero-filled) value. 2560 */ 2561 offs = buf->dtb_offset; 2562 while (offs & (align - 1)) 2563 offs += sizeof (uint32_t); 2564 2565 /* 2566 * If we don't have enough room to both allocate a new key _and_ 2567 * its associated data, increment the drop count and return. 2568 */ 2569 if ((uintptr_t)tomax + offs + fsize > 2570 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2571 dtrace_buffer_drop(buf); 2572 return; 2573 } 2574 2575 /*CONSTCOND*/ 2576 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2577 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2578 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2579 2580 key->dtak_data = kdata = tomax + offs; 2581 buf->dtb_offset = offs + fsize; 2582 2583 /* 2584 * Now copy the data across. 2585 */ 2586 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2587 2588 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2589 kdata[i] = data[i]; 2590 2591 /* 2592 * Because strings are not zeroed out by default, we need to iterate 2593 * looking for actions that store strings, and we need to explicitly 2594 * pad these strings out with zeroes. 2595 */ 2596 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2597 int nul; 2598 2599 if (!DTRACEACT_ISSTRING(act)) 2600 continue; 2601 2602 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2603 limit = i + act->dta_rec.dtrd_size; 2604 ASSERT(limit <= size); 2605 2606 for (nul = 0; i < limit; i++) { 2607 if (nul) { 2608 kdata[i] = '\0'; 2609 continue; 2610 } 2611 2612 if (data[i] != '\0') 2613 continue; 2614 2615 nul = 1; 2616 } 2617 } 2618 2619 for (i = size; i < fsize; i++) 2620 kdata[i] = 0; 2621 2622 key->dtak_hashval = hashval; 2623 key->dtak_size = size; 2624 key->dtak_action = action; 2625 key->dtak_next = agb->dtagb_hash[ndx]; 2626 agb->dtagb_hash[ndx] = key; 2627 2628 /* 2629 * Finally, apply the aggregator. 2630 */ 2631 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2632 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2633 } 2634 2635 /* 2636 * Given consumer state, this routine finds a speculation in the INACTIVE 2637 * state and transitions it into the ACTIVE state. If there is no speculation 2638 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2639 * incremented -- it is up to the caller to take appropriate action. 2640 */ 2641 static int 2642 dtrace_speculation(dtrace_state_t *state) 2643 { 2644 int i = 0; 2645 dtrace_speculation_state_t current; 2646 uint32_t *stat = &state->dts_speculations_unavail, count; 2647 2648 while (i < state->dts_nspeculations) { 2649 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2650 2651 current = spec->dtsp_state; 2652 2653 if (current != DTRACESPEC_INACTIVE) { 2654 if (current == DTRACESPEC_COMMITTINGMANY || 2655 current == DTRACESPEC_COMMITTING || 2656 current == DTRACESPEC_DISCARDING) 2657 stat = &state->dts_speculations_busy; 2658 i++; 2659 continue; 2660 } 2661 2662 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2663 current, DTRACESPEC_ACTIVE) == current) 2664 return (i + 1); 2665 } 2666 2667 /* 2668 * We couldn't find a speculation. If we found as much as a single 2669 * busy speculation buffer, we'll attribute this failure as "busy" 2670 * instead of "unavail". 2671 */ 2672 do { 2673 count = *stat; 2674 } while (dtrace_cas32(stat, count, count + 1) != count); 2675 2676 return (0); 2677 } 2678 2679 /* 2680 * This routine commits an active speculation. If the specified speculation 2681 * is not in a valid state to perform a commit(), this routine will silently do 2682 * nothing. The state of the specified speculation is transitioned according 2683 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2684 */ 2685 static void 2686 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2687 dtrace_specid_t which) 2688 { 2689 dtrace_speculation_t *spec; 2690 dtrace_buffer_t *src, *dest; 2691 uintptr_t daddr, saddr, dlimit, slimit; 2692 dtrace_speculation_state_t current, new = 0; 2693 intptr_t offs; 2694 uint64_t timestamp; 2695 2696 if (which == 0) 2697 return; 2698 2699 if (which > state->dts_nspeculations) { 2700 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2701 return; 2702 } 2703 2704 spec = &state->dts_speculations[which - 1]; 2705 src = &spec->dtsp_buffer[cpu]; 2706 dest = &state->dts_buffer[cpu]; 2707 2708 do { 2709 current = spec->dtsp_state; 2710 2711 if (current == DTRACESPEC_COMMITTINGMANY) 2712 break; 2713 2714 switch (current) { 2715 case DTRACESPEC_INACTIVE: 2716 case DTRACESPEC_DISCARDING: 2717 return; 2718 2719 case DTRACESPEC_COMMITTING: 2720 /* 2721 * This is only possible if we are (a) commit()'ing 2722 * without having done a prior speculate() on this CPU 2723 * and (b) racing with another commit() on a different 2724 * CPU. There's nothing to do -- we just assert that 2725 * our offset is 0. 2726 */ 2727 ASSERT(src->dtb_offset == 0); 2728 return; 2729 2730 case DTRACESPEC_ACTIVE: 2731 new = DTRACESPEC_COMMITTING; 2732 break; 2733 2734 case DTRACESPEC_ACTIVEONE: 2735 /* 2736 * This speculation is active on one CPU. If our 2737 * buffer offset is non-zero, we know that the one CPU 2738 * must be us. Otherwise, we are committing on a 2739 * different CPU from the speculate(), and we must 2740 * rely on being asynchronously cleaned. 2741 */ 2742 if (src->dtb_offset != 0) { 2743 new = DTRACESPEC_COMMITTING; 2744 break; 2745 } 2746 /*FALLTHROUGH*/ 2747 2748 case DTRACESPEC_ACTIVEMANY: 2749 new = DTRACESPEC_COMMITTINGMANY; 2750 break; 2751 2752 default: 2753 ASSERT(0); 2754 } 2755 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2756 current, new) != current); 2757 2758 /* 2759 * We have set the state to indicate that we are committing this 2760 * speculation. Now reserve the necessary space in the destination 2761 * buffer. 2762 */ 2763 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2764 sizeof (uint64_t), state, NULL)) < 0) { 2765 dtrace_buffer_drop(dest); 2766 goto out; 2767 } 2768 2769 /* 2770 * We have sufficient space to copy the speculative buffer into the 2771 * primary buffer. First, modify the speculative buffer, filling 2772 * in the timestamp of all entries with the current time. The data 2773 * must have the commit() time rather than the time it was traced, 2774 * so that all entries in the primary buffer are in timestamp order. 2775 */ 2776 timestamp = dtrace_gethrtime(); 2777 saddr = (uintptr_t)src->dtb_tomax; 2778 slimit = saddr + src->dtb_offset; 2779 while (saddr < slimit) { 2780 size_t size; 2781 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2782 2783 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2784 saddr += sizeof (dtrace_epid_t); 2785 continue; 2786 } 2787 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2788 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2789 2790 ASSERT3U(saddr + size, <=, slimit); 2791 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2792 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2793 2794 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2795 2796 saddr += size; 2797 } 2798 2799 /* 2800 * Copy the buffer across. (Note that this is a 2801 * highly subobtimal bcopy(); in the unlikely event that this becomes 2802 * a serious performance issue, a high-performance DTrace-specific 2803 * bcopy() should obviously be invented.) 2804 */ 2805 daddr = (uintptr_t)dest->dtb_tomax + offs; 2806 dlimit = daddr + src->dtb_offset; 2807 saddr = (uintptr_t)src->dtb_tomax; 2808 2809 /* 2810 * First, the aligned portion. 2811 */ 2812 while (dlimit - daddr >= sizeof (uint64_t)) { 2813 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2814 2815 daddr += sizeof (uint64_t); 2816 saddr += sizeof (uint64_t); 2817 } 2818 2819 /* 2820 * Now any left-over bit... 2821 */ 2822 while (dlimit - daddr) 2823 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2824 2825 /* 2826 * Finally, commit the reserved space in the destination buffer. 2827 */ 2828 dest->dtb_offset = offs + src->dtb_offset; 2829 2830 out: 2831 /* 2832 * If we're lucky enough to be the only active CPU on this speculation 2833 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2834 */ 2835 if (current == DTRACESPEC_ACTIVE || 2836 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2837 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2838 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2839 2840 ASSERT(rval == DTRACESPEC_COMMITTING); 2841 } 2842 2843 src->dtb_offset = 0; 2844 src->dtb_xamot_drops += src->dtb_drops; 2845 src->dtb_drops = 0; 2846 } 2847 2848 /* 2849 * This routine discards an active speculation. If the specified speculation 2850 * is not in a valid state to perform a discard(), this routine will silently 2851 * do nothing. The state of the specified speculation is transitioned 2852 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2853 */ 2854 static void 2855 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2856 dtrace_specid_t which) 2857 { 2858 dtrace_speculation_t *spec; 2859 dtrace_speculation_state_t current, new = 0; 2860 dtrace_buffer_t *buf; 2861 2862 if (which == 0) 2863 return; 2864 2865 if (which > state->dts_nspeculations) { 2866 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2867 return; 2868 } 2869 2870 spec = &state->dts_speculations[which - 1]; 2871 buf = &spec->dtsp_buffer[cpu]; 2872 2873 do { 2874 current = spec->dtsp_state; 2875 2876 switch (current) { 2877 case DTRACESPEC_INACTIVE: 2878 case DTRACESPEC_COMMITTINGMANY: 2879 case DTRACESPEC_COMMITTING: 2880 case DTRACESPEC_DISCARDING: 2881 return; 2882 2883 case DTRACESPEC_ACTIVE: 2884 case DTRACESPEC_ACTIVEMANY: 2885 new = DTRACESPEC_DISCARDING; 2886 break; 2887 2888 case DTRACESPEC_ACTIVEONE: 2889 if (buf->dtb_offset != 0) { 2890 new = DTRACESPEC_INACTIVE; 2891 } else { 2892 new = DTRACESPEC_DISCARDING; 2893 } 2894 break; 2895 2896 default: 2897 ASSERT(0); 2898 } 2899 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2900 current, new) != current); 2901 2902 buf->dtb_offset = 0; 2903 buf->dtb_drops = 0; 2904 } 2905 2906 /* 2907 * Note: not called from probe context. This function is called 2908 * asynchronously from cross call context to clean any speculations that are 2909 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2910 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2911 * speculation. 2912 */ 2913 static void 2914 dtrace_speculation_clean_here(dtrace_state_t *state) 2915 { 2916 dtrace_icookie_t cookie; 2917 processorid_t cpu = curcpu; 2918 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2919 dtrace_specid_t i; 2920 2921 cookie = dtrace_interrupt_disable(); 2922 2923 if (dest->dtb_tomax == NULL) { 2924 dtrace_interrupt_enable(cookie); 2925 return; 2926 } 2927 2928 for (i = 0; i < state->dts_nspeculations; i++) { 2929 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2930 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2931 2932 if (src->dtb_tomax == NULL) 2933 continue; 2934 2935 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2936 src->dtb_offset = 0; 2937 continue; 2938 } 2939 2940 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2941 continue; 2942 2943 if (src->dtb_offset == 0) 2944 continue; 2945 2946 dtrace_speculation_commit(state, cpu, i + 1); 2947 } 2948 2949 dtrace_interrupt_enable(cookie); 2950 } 2951 2952 /* 2953 * Note: not called from probe context. This function is called 2954 * asynchronously (and at a regular interval) to clean any speculations that 2955 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2956 * is work to be done, it cross calls all CPUs to perform that work; 2957 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2958 * INACTIVE state until they have been cleaned by all CPUs. 2959 */ 2960 static void 2961 dtrace_speculation_clean(dtrace_state_t *state) 2962 { 2963 int work = 0, rv; 2964 dtrace_specid_t i; 2965 2966 for (i = 0; i < state->dts_nspeculations; i++) { 2967 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2968 2969 ASSERT(!spec->dtsp_cleaning); 2970 2971 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2972 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2973 continue; 2974 2975 work++; 2976 spec->dtsp_cleaning = 1; 2977 } 2978 2979 if (!work) 2980 return; 2981 2982 dtrace_xcall(DTRACE_CPUALL, 2983 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2984 2985 /* 2986 * We now know that all CPUs have committed or discarded their 2987 * speculation buffers, as appropriate. We can now set the state 2988 * to inactive. 2989 */ 2990 for (i = 0; i < state->dts_nspeculations; i++) { 2991 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2992 dtrace_speculation_state_t current, new; 2993 2994 if (!spec->dtsp_cleaning) 2995 continue; 2996 2997 current = spec->dtsp_state; 2998 ASSERT(current == DTRACESPEC_DISCARDING || 2999 current == DTRACESPEC_COMMITTINGMANY); 3000 3001 new = DTRACESPEC_INACTIVE; 3002 3003 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 3004 ASSERT(rv == current); 3005 spec->dtsp_cleaning = 0; 3006 } 3007 } 3008 3009 /* 3010 * Called as part of a speculate() to get the speculative buffer associated 3011 * with a given speculation. Returns NULL if the specified speculation is not 3012 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 3013 * the active CPU is not the specified CPU -- the speculation will be 3014 * atomically transitioned into the ACTIVEMANY state. 3015 */ 3016 static dtrace_buffer_t * 3017 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 3018 dtrace_specid_t which) 3019 { 3020 dtrace_speculation_t *spec; 3021 dtrace_speculation_state_t current, new = 0; 3022 dtrace_buffer_t *buf; 3023 3024 if (which == 0) 3025 return (NULL); 3026 3027 if (which > state->dts_nspeculations) { 3028 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 3029 return (NULL); 3030 } 3031 3032 spec = &state->dts_speculations[which - 1]; 3033 buf = &spec->dtsp_buffer[cpuid]; 3034 3035 do { 3036 current = spec->dtsp_state; 3037 3038 switch (current) { 3039 case DTRACESPEC_INACTIVE: 3040 case DTRACESPEC_COMMITTINGMANY: 3041 case DTRACESPEC_DISCARDING: 3042 return (NULL); 3043 3044 case DTRACESPEC_COMMITTING: 3045 ASSERT(buf->dtb_offset == 0); 3046 return (NULL); 3047 3048 case DTRACESPEC_ACTIVEONE: 3049 /* 3050 * This speculation is currently active on one CPU. 3051 * Check the offset in the buffer; if it's non-zero, 3052 * that CPU must be us (and we leave the state alone). 3053 * If it's zero, assume that we're starting on a new 3054 * CPU -- and change the state to indicate that the 3055 * speculation is active on more than one CPU. 3056 */ 3057 if (buf->dtb_offset != 0) 3058 return (buf); 3059 3060 new = DTRACESPEC_ACTIVEMANY; 3061 break; 3062 3063 case DTRACESPEC_ACTIVEMANY: 3064 return (buf); 3065 3066 case DTRACESPEC_ACTIVE: 3067 new = DTRACESPEC_ACTIVEONE; 3068 break; 3069 3070 default: 3071 ASSERT(0); 3072 } 3073 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 3074 current, new) != current); 3075 3076 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 3077 return (buf); 3078 } 3079 3080 /* 3081 * Return a string. In the event that the user lacks the privilege to access 3082 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3083 * don't fail access checking. 3084 * 3085 * dtrace_dif_variable() uses this routine as a helper for various 3086 * builtin values such as 'execname' and 'probefunc.' 3087 */ 3088 uintptr_t 3089 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 3090 dtrace_mstate_t *mstate) 3091 { 3092 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3093 uintptr_t ret; 3094 size_t strsz; 3095 3096 /* 3097 * The easy case: this probe is allowed to read all of memory, so 3098 * we can just return this as a vanilla pointer. 3099 */ 3100 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 3101 return (addr); 3102 3103 /* 3104 * This is the tougher case: we copy the string in question from 3105 * kernel memory into scratch memory and return it that way: this 3106 * ensures that we won't trip up when access checking tests the 3107 * BYREF return value. 3108 */ 3109 strsz = dtrace_strlen((char *)addr, size) + 1; 3110 3111 if (mstate->dtms_scratch_ptr + strsz > 3112 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3113 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3114 return (0); 3115 } 3116 3117 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3118 strsz); 3119 ret = mstate->dtms_scratch_ptr; 3120 mstate->dtms_scratch_ptr += strsz; 3121 return (ret); 3122 } 3123 3124 /* 3125 * Return a string from a memoy address which is known to have one or 3126 * more concatenated, individually zero terminated, sub-strings. 3127 * In the event that the user lacks the privilege to access 3128 * arbitrary kernel memory, we copy the string out to scratch memory so that we 3129 * don't fail access checking. 3130 * 3131 * dtrace_dif_variable() uses this routine as a helper for various 3132 * builtin values such as 'execargs'. 3133 */ 3134 static uintptr_t 3135 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 3136 dtrace_mstate_t *mstate) 3137 { 3138 char *p; 3139 size_t i; 3140 uintptr_t ret; 3141 3142 if (mstate->dtms_scratch_ptr + strsz > 3143 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3144 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3145 return (0); 3146 } 3147 3148 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 3149 strsz); 3150 3151 /* Replace sub-string termination characters with a space. */ 3152 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 3153 p++, i++) 3154 if (*p == '\0') 3155 *p = ' '; 3156 3157 ret = mstate->dtms_scratch_ptr; 3158 mstate->dtms_scratch_ptr += strsz; 3159 return (ret); 3160 } 3161 3162 /* 3163 * This function implements the DIF emulator's variable lookups. The emulator 3164 * passes a reserved variable identifier and optional built-in array index. 3165 */ 3166 static uint64_t 3167 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 3168 uint64_t ndx) 3169 { 3170 /* 3171 * If we're accessing one of the uncached arguments, we'll turn this 3172 * into a reference in the args array. 3173 */ 3174 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 3175 ndx = v - DIF_VAR_ARG0; 3176 v = DIF_VAR_ARGS; 3177 } 3178 3179 switch (v) { 3180 case DIF_VAR_ARGS: 3181 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 3182 if (ndx >= sizeof (mstate->dtms_arg) / 3183 sizeof (mstate->dtms_arg[0])) { 3184 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3185 dtrace_provider_t *pv; 3186 uint64_t val; 3187 3188 pv = mstate->dtms_probe->dtpr_provider; 3189 if (pv->dtpv_pops.dtps_getargval != NULL) 3190 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 3191 mstate->dtms_probe->dtpr_id, 3192 mstate->dtms_probe->dtpr_arg, ndx, aframes); 3193 else 3194 val = dtrace_getarg(ndx, aframes); 3195 3196 /* 3197 * This is regrettably required to keep the compiler 3198 * from tail-optimizing the call to dtrace_getarg(). 3199 * The condition always evaluates to true, but the 3200 * compiler has no way of figuring that out a priori. 3201 * (None of this would be necessary if the compiler 3202 * could be relied upon to _always_ tail-optimize 3203 * the call to dtrace_getarg() -- but it can't.) 3204 */ 3205 if (mstate->dtms_probe != NULL) 3206 return (val); 3207 3208 ASSERT(0); 3209 } 3210 3211 return (mstate->dtms_arg[ndx]); 3212 3213 #ifdef illumos 3214 case DIF_VAR_UREGS: { 3215 klwp_t *lwp; 3216 3217 if (!dtrace_priv_proc(state)) 3218 return (0); 3219 3220 if ((lwp = curthread->t_lwp) == NULL) { 3221 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3222 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 3223 return (0); 3224 } 3225 3226 return (dtrace_getreg(lwp->lwp_regs, ndx)); 3227 return (0); 3228 } 3229 #else 3230 case DIF_VAR_UREGS: { 3231 struct trapframe *tframe; 3232 3233 if (!dtrace_priv_proc(state)) 3234 return (0); 3235 3236 if ((tframe = curthread->td_frame) == NULL) { 3237 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 3238 cpu_core[curcpu].cpuc_dtrace_illval = 0; 3239 return (0); 3240 } 3241 3242 return (dtrace_getreg(tframe, ndx)); 3243 } 3244 #endif 3245 3246 case DIF_VAR_CURTHREAD: 3247 if (!dtrace_priv_proc(state)) 3248 return (0); 3249 return ((uint64_t)(uintptr_t)curthread); 3250 3251 case DIF_VAR_TIMESTAMP: 3252 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 3253 mstate->dtms_timestamp = dtrace_gethrtime(); 3254 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 3255 } 3256 return (mstate->dtms_timestamp); 3257 3258 case DIF_VAR_VTIMESTAMP: 3259 ASSERT(dtrace_vtime_references != 0); 3260 return (curthread->t_dtrace_vtime); 3261 3262 case DIF_VAR_WALLTIMESTAMP: 3263 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 3264 mstate->dtms_walltimestamp = dtrace_gethrestime(); 3265 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 3266 } 3267 return (mstate->dtms_walltimestamp); 3268 3269 #ifdef illumos 3270 case DIF_VAR_IPL: 3271 if (!dtrace_priv_kernel(state)) 3272 return (0); 3273 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 3274 mstate->dtms_ipl = dtrace_getipl(); 3275 mstate->dtms_present |= DTRACE_MSTATE_IPL; 3276 } 3277 return (mstate->dtms_ipl); 3278 #endif 3279 3280 case DIF_VAR_EPID: 3281 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 3282 return (mstate->dtms_epid); 3283 3284 case DIF_VAR_ID: 3285 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3286 return (mstate->dtms_probe->dtpr_id); 3287 3288 case DIF_VAR_STACKDEPTH: 3289 if (!dtrace_priv_kernel(state)) 3290 return (0); 3291 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 3292 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3293 3294 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 3295 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 3296 } 3297 return (mstate->dtms_stackdepth); 3298 3299 case DIF_VAR_USTACKDEPTH: 3300 if (!dtrace_priv_proc(state)) 3301 return (0); 3302 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 3303 /* 3304 * See comment in DIF_VAR_PID. 3305 */ 3306 if (DTRACE_ANCHORED(mstate->dtms_probe) && 3307 CPU_ON_INTR(CPU)) { 3308 mstate->dtms_ustackdepth = 0; 3309 } else { 3310 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3311 mstate->dtms_ustackdepth = 3312 dtrace_getustackdepth(); 3313 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3314 } 3315 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 3316 } 3317 return (mstate->dtms_ustackdepth); 3318 3319 case DIF_VAR_CALLER: 3320 if (!dtrace_priv_kernel(state)) 3321 return (0); 3322 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 3323 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 3324 3325 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 3326 /* 3327 * If this is an unanchored probe, we are 3328 * required to go through the slow path: 3329 * dtrace_caller() only guarantees correct 3330 * results for anchored probes. 3331 */ 3332 pc_t caller[2] = {0, 0}; 3333 3334 dtrace_getpcstack(caller, 2, aframes, 3335 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 3336 mstate->dtms_caller = caller[1]; 3337 } else if ((mstate->dtms_caller = 3338 dtrace_caller(aframes)) == -1) { 3339 /* 3340 * We have failed to do this the quick way; 3341 * we must resort to the slower approach of 3342 * calling dtrace_getpcstack(). 3343 */ 3344 pc_t caller = 0; 3345 3346 dtrace_getpcstack(&caller, 1, aframes, NULL); 3347 mstate->dtms_caller = caller; 3348 } 3349 3350 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3351 } 3352 return (mstate->dtms_caller); 3353 3354 case DIF_VAR_UCALLER: 3355 if (!dtrace_priv_proc(state)) 3356 return (0); 3357 3358 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3359 uint64_t ustack[3]; 3360 3361 /* 3362 * dtrace_getupcstack() fills in the first uint64_t 3363 * with the current PID. The second uint64_t will 3364 * be the program counter at user-level. The third 3365 * uint64_t will contain the caller, which is what 3366 * we're after. 3367 */ 3368 ustack[2] = 0; 3369 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3370 dtrace_getupcstack(ustack, 3); 3371 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3372 mstate->dtms_ucaller = ustack[2]; 3373 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3374 } 3375 3376 return (mstate->dtms_ucaller); 3377 3378 case DIF_VAR_PROBEPROV: 3379 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3380 return (dtrace_dif_varstr( 3381 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3382 state, mstate)); 3383 3384 case DIF_VAR_PROBEMOD: 3385 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3386 return (dtrace_dif_varstr( 3387 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3388 state, mstate)); 3389 3390 case DIF_VAR_PROBEFUNC: 3391 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3392 return (dtrace_dif_varstr( 3393 (uintptr_t)mstate->dtms_probe->dtpr_func, 3394 state, mstate)); 3395 3396 case DIF_VAR_PROBENAME: 3397 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3398 return (dtrace_dif_varstr( 3399 (uintptr_t)mstate->dtms_probe->dtpr_name, 3400 state, mstate)); 3401 3402 case DIF_VAR_PID: 3403 if (!dtrace_priv_proc(state)) 3404 return (0); 3405 3406 #ifdef illumos 3407 /* 3408 * Note that we are assuming that an unanchored probe is 3409 * always due to a high-level interrupt. (And we're assuming 3410 * that there is only a single high level interrupt.) 3411 */ 3412 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3413 return (pid0.pid_id); 3414 3415 /* 3416 * It is always safe to dereference one's own t_procp pointer: 3417 * it always points to a valid, allocated proc structure. 3418 * Further, it is always safe to dereference the p_pidp member 3419 * of one's own proc structure. (These are truisms becuase 3420 * threads and processes don't clean up their own state -- 3421 * they leave that task to whomever reaps them.) 3422 */ 3423 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3424 #else 3425 return ((uint64_t)curproc->p_pid); 3426 #endif 3427 3428 case DIF_VAR_PPID: 3429 if (!dtrace_priv_proc(state)) 3430 return (0); 3431 3432 #ifdef illumos 3433 /* 3434 * See comment in DIF_VAR_PID. 3435 */ 3436 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3437 return (pid0.pid_id); 3438 3439 /* 3440 * It is always safe to dereference one's own t_procp pointer: 3441 * it always points to a valid, allocated proc structure. 3442 * (This is true because threads don't clean up their own 3443 * state -- they leave that task to whomever reaps them.) 3444 */ 3445 return ((uint64_t)curthread->t_procp->p_ppid); 3446 #else 3447 if (curproc->p_pid == proc0.p_pid) 3448 return (curproc->p_pid); 3449 else 3450 return (curproc->p_pptr->p_pid); 3451 #endif 3452 3453 case DIF_VAR_TID: 3454 #ifdef illumos 3455 /* 3456 * See comment in DIF_VAR_PID. 3457 */ 3458 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3459 return (0); 3460 #endif 3461 3462 return ((uint64_t)curthread->t_tid); 3463 3464 case DIF_VAR_EXECARGS: { 3465 struct pargs *p_args = curthread->td_proc->p_args; 3466 3467 if (p_args == NULL) 3468 return(0); 3469 3470 return (dtrace_dif_varstrz( 3471 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3472 } 3473 3474 case DIF_VAR_EXECNAME: 3475 #ifdef illumos 3476 if (!dtrace_priv_proc(state)) 3477 return (0); 3478 3479 /* 3480 * See comment in DIF_VAR_PID. 3481 */ 3482 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3483 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3484 3485 /* 3486 * It is always safe to dereference one's own t_procp pointer: 3487 * it always points to a valid, allocated proc structure. 3488 * (This is true because threads don't clean up their own 3489 * state -- they leave that task to whomever reaps them.) 3490 */ 3491 return (dtrace_dif_varstr( 3492 (uintptr_t)curthread->t_procp->p_user.u_comm, 3493 state, mstate)); 3494 #else 3495 return (dtrace_dif_varstr( 3496 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3497 #endif 3498 3499 case DIF_VAR_ZONENAME: 3500 #ifdef illumos 3501 if (!dtrace_priv_proc(state)) 3502 return (0); 3503 3504 /* 3505 * See comment in DIF_VAR_PID. 3506 */ 3507 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3508 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3509 3510 /* 3511 * It is always safe to dereference one's own t_procp pointer: 3512 * it always points to a valid, allocated proc structure. 3513 * (This is true because threads don't clean up their own 3514 * state -- they leave that task to whomever reaps them.) 3515 */ 3516 return (dtrace_dif_varstr( 3517 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3518 state, mstate)); 3519 #else 3520 return (0); 3521 #endif 3522 3523 case DIF_VAR_UID: 3524 if (!dtrace_priv_proc(state)) 3525 return (0); 3526 3527 #ifdef illumos 3528 /* 3529 * See comment in DIF_VAR_PID. 3530 */ 3531 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3532 return ((uint64_t)p0.p_cred->cr_uid); 3533 3534 /* 3535 * It is always safe to dereference one's own t_procp pointer: 3536 * it always points to a valid, allocated proc structure. 3537 * (This is true because threads don't clean up their own 3538 * state -- they leave that task to whomever reaps them.) 3539 * 3540 * Additionally, it is safe to dereference one's own process 3541 * credential, since this is never NULL after process birth. 3542 */ 3543 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3544 #else 3545 return ((uint64_t)curthread->td_ucred->cr_uid); 3546 #endif 3547 3548 case DIF_VAR_GID: 3549 if (!dtrace_priv_proc(state)) 3550 return (0); 3551 3552 #ifdef illumos 3553 /* 3554 * See comment in DIF_VAR_PID. 3555 */ 3556 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3557 return ((uint64_t)p0.p_cred->cr_gid); 3558 3559 /* 3560 * It is always safe to dereference one's own t_procp pointer: 3561 * it always points to a valid, allocated proc structure. 3562 * (This is true because threads don't clean up their own 3563 * state -- they leave that task to whomever reaps them.) 3564 * 3565 * Additionally, it is safe to dereference one's own process 3566 * credential, since this is never NULL after process birth. 3567 */ 3568 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3569 #else 3570 return ((uint64_t)curthread->td_ucred->cr_gid); 3571 #endif 3572 3573 case DIF_VAR_ERRNO: { 3574 #ifdef illumos 3575 klwp_t *lwp; 3576 if (!dtrace_priv_proc(state)) 3577 return (0); 3578 3579 /* 3580 * See comment in DIF_VAR_PID. 3581 */ 3582 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3583 return (0); 3584 3585 /* 3586 * It is always safe to dereference one's own t_lwp pointer in 3587 * the event that this pointer is non-NULL. (This is true 3588 * because threads and lwps don't clean up their own state -- 3589 * they leave that task to whomever reaps them.) 3590 */ 3591 if ((lwp = curthread->t_lwp) == NULL) 3592 return (0); 3593 3594 return ((uint64_t)lwp->lwp_errno); 3595 #else 3596 return (curthread->td_errno); 3597 #endif 3598 } 3599 #ifndef illumos 3600 case DIF_VAR_CPU: { 3601 return curcpu; 3602 } 3603 #endif 3604 default: 3605 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3606 return (0); 3607 } 3608 } 3609 3610 3611 typedef enum dtrace_json_state { 3612 DTRACE_JSON_REST = 1, 3613 DTRACE_JSON_OBJECT, 3614 DTRACE_JSON_STRING, 3615 DTRACE_JSON_STRING_ESCAPE, 3616 DTRACE_JSON_STRING_ESCAPE_UNICODE, 3617 DTRACE_JSON_COLON, 3618 DTRACE_JSON_COMMA, 3619 DTRACE_JSON_VALUE, 3620 DTRACE_JSON_IDENTIFIER, 3621 DTRACE_JSON_NUMBER, 3622 DTRACE_JSON_NUMBER_FRAC, 3623 DTRACE_JSON_NUMBER_EXP, 3624 DTRACE_JSON_COLLECT_OBJECT 3625 } dtrace_json_state_t; 3626 3627 /* 3628 * This function possesses just enough knowledge about JSON to extract a single 3629 * value from a JSON string and store it in the scratch buffer. It is able 3630 * to extract nested object values, and members of arrays by index. 3631 * 3632 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to 3633 * be looked up as we descend into the object tree. e.g. 3634 * 3635 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL 3636 * with nelems = 5. 3637 * 3638 * The run time of this function must be bounded above by strsize to limit the 3639 * amount of work done in probe context. As such, it is implemented as a 3640 * simple state machine, reading one character at a time using safe loads 3641 * until we find the requested element, hit a parsing error or run off the 3642 * end of the object or string. 3643 * 3644 * As there is no way for a subroutine to return an error without interrupting 3645 * clause execution, we simply return NULL in the event of a missing key or any 3646 * other error condition. Each NULL return in this function is commented with 3647 * the error condition it represents -- parsing or otherwise. 3648 * 3649 * The set of states for the state machine closely matches the JSON 3650 * specification (http://json.org/). Briefly: 3651 * 3652 * DTRACE_JSON_REST: 3653 * Skip whitespace until we find either a top-level Object, moving 3654 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. 3655 * 3656 * DTRACE_JSON_OBJECT: 3657 * Locate the next key String in an Object. Sets a flag to denote 3658 * the next String as a key string and moves to DTRACE_JSON_STRING. 3659 * 3660 * DTRACE_JSON_COLON: 3661 * Skip whitespace until we find the colon that separates key Strings 3662 * from their values. Once found, move to DTRACE_JSON_VALUE. 3663 * 3664 * DTRACE_JSON_VALUE: 3665 * Detects the type of the next value (String, Number, Identifier, Object 3666 * or Array) and routes to the states that process that type. Here we also 3667 * deal with the element selector list if we are requested to traverse down 3668 * into the object tree. 3669 * 3670 * DTRACE_JSON_COMMA: 3671 * Skip whitespace until we find the comma that separates key-value pairs 3672 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays 3673 * (similarly DTRACE_JSON_VALUE). All following literal value processing 3674 * states return to this state at the end of their value, unless otherwise 3675 * noted. 3676 * 3677 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: 3678 * Processes a Number literal from the JSON, including any exponent 3679 * component that may be present. Numbers are returned as strings, which 3680 * may be passed to strtoll() if an integer is required. 3681 * 3682 * DTRACE_JSON_IDENTIFIER: 3683 * Processes a "true", "false" or "null" literal in the JSON. 3684 * 3685 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, 3686 * DTRACE_JSON_STRING_ESCAPE_UNICODE: 3687 * Processes a String literal from the JSON, whether the String denotes 3688 * a key, a value or part of a larger Object. Handles all escape sequences 3689 * present in the specification, including four-digit unicode characters, 3690 * but merely includes the escape sequence without converting it to the 3691 * actual escaped character. If the String is flagged as a key, we 3692 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. 3693 * 3694 * DTRACE_JSON_COLLECT_OBJECT: 3695 * This state collects an entire Object (or Array), correctly handling 3696 * embedded strings. If the full element selector list matches this nested 3697 * object, we return the Object in full as a string. If not, we use this 3698 * state to skip to the next value at this level and continue processing. 3699 * 3700 * NOTE: This function uses various macros from strtolctype.h to manipulate 3701 * digit values, etc -- these have all been checked to ensure they make 3702 * no additional function calls. 3703 */ 3704 static char * 3705 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, 3706 char *dest) 3707 { 3708 dtrace_json_state_t state = DTRACE_JSON_REST; 3709 int64_t array_elem = INT64_MIN; 3710 int64_t array_pos = 0; 3711 uint8_t escape_unicount = 0; 3712 boolean_t string_is_key = B_FALSE; 3713 boolean_t collect_object = B_FALSE; 3714 boolean_t found_key = B_FALSE; 3715 boolean_t in_array = B_FALSE; 3716 uint32_t braces = 0, brackets = 0; 3717 char *elem = elemlist; 3718 char *dd = dest; 3719 uintptr_t cur; 3720 3721 for (cur = json; cur < json + size; cur++) { 3722 char cc = dtrace_load8(cur); 3723 if (cc == '\0') 3724 return (NULL); 3725 3726 switch (state) { 3727 case DTRACE_JSON_REST: 3728 if (isspace(cc)) 3729 break; 3730 3731 if (cc == '{') { 3732 state = DTRACE_JSON_OBJECT; 3733 break; 3734 } 3735 3736 if (cc == '[') { 3737 in_array = B_TRUE; 3738 array_pos = 0; 3739 array_elem = dtrace_strtoll(elem, 10, size); 3740 found_key = array_elem == 0 ? B_TRUE : B_FALSE; 3741 state = DTRACE_JSON_VALUE; 3742 break; 3743 } 3744 3745 /* 3746 * ERROR: expected to find a top-level object or array. 3747 */ 3748 return (NULL); 3749 case DTRACE_JSON_OBJECT: 3750 if (isspace(cc)) 3751 break; 3752 3753 if (cc == '"') { 3754 state = DTRACE_JSON_STRING; 3755 string_is_key = B_TRUE; 3756 break; 3757 } 3758 3759 /* 3760 * ERROR: either the object did not start with a key 3761 * string, or we've run off the end of the object 3762 * without finding the requested key. 3763 */ 3764 return (NULL); 3765 case DTRACE_JSON_STRING: 3766 if (cc == '\\') { 3767 *dd++ = '\\'; 3768 state = DTRACE_JSON_STRING_ESCAPE; 3769 break; 3770 } 3771 3772 if (cc == '"') { 3773 if (collect_object) { 3774 /* 3775 * We don't reset the dest here, as 3776 * the string is part of a larger 3777 * object being collected. 3778 */ 3779 *dd++ = cc; 3780 collect_object = B_FALSE; 3781 state = DTRACE_JSON_COLLECT_OBJECT; 3782 break; 3783 } 3784 *dd = '\0'; 3785 dd = dest; /* reset string buffer */ 3786 if (string_is_key) { 3787 if (dtrace_strncmp(dest, elem, 3788 size) == 0) 3789 found_key = B_TRUE; 3790 } else if (found_key) { 3791 if (nelems > 1) { 3792 /* 3793 * We expected an object, not 3794 * this string. 3795 */ 3796 return (NULL); 3797 } 3798 return (dest); 3799 } 3800 state = string_is_key ? DTRACE_JSON_COLON : 3801 DTRACE_JSON_COMMA; 3802 string_is_key = B_FALSE; 3803 break; 3804 } 3805 3806 *dd++ = cc; 3807 break; 3808 case DTRACE_JSON_STRING_ESCAPE: 3809 *dd++ = cc; 3810 if (cc == 'u') { 3811 escape_unicount = 0; 3812 state = DTRACE_JSON_STRING_ESCAPE_UNICODE; 3813 } else { 3814 state = DTRACE_JSON_STRING; 3815 } 3816 break; 3817 case DTRACE_JSON_STRING_ESCAPE_UNICODE: 3818 if (!isxdigit(cc)) { 3819 /* 3820 * ERROR: invalid unicode escape, expected 3821 * four valid hexidecimal digits. 3822 */ 3823 return (NULL); 3824 } 3825 3826 *dd++ = cc; 3827 if (++escape_unicount == 4) 3828 state = DTRACE_JSON_STRING; 3829 break; 3830 case DTRACE_JSON_COLON: 3831 if (isspace(cc)) 3832 break; 3833 3834 if (cc == ':') { 3835 state = DTRACE_JSON_VALUE; 3836 break; 3837 } 3838 3839 /* 3840 * ERROR: expected a colon. 3841 */ 3842 return (NULL); 3843 case DTRACE_JSON_COMMA: 3844 if (isspace(cc)) 3845 break; 3846 3847 if (cc == ',') { 3848 if (in_array) { 3849 state = DTRACE_JSON_VALUE; 3850 if (++array_pos == array_elem) 3851 found_key = B_TRUE; 3852 } else { 3853 state = DTRACE_JSON_OBJECT; 3854 } 3855 break; 3856 } 3857 3858 /* 3859 * ERROR: either we hit an unexpected character, or 3860 * we reached the end of the object or array without 3861 * finding the requested key. 3862 */ 3863 return (NULL); 3864 case DTRACE_JSON_IDENTIFIER: 3865 if (islower(cc)) { 3866 *dd++ = cc; 3867 break; 3868 } 3869 3870 *dd = '\0'; 3871 dd = dest; /* reset string buffer */ 3872 3873 if (dtrace_strncmp(dest, "true", 5) == 0 || 3874 dtrace_strncmp(dest, "false", 6) == 0 || 3875 dtrace_strncmp(dest, "null", 5) == 0) { 3876 if (found_key) { 3877 if (nelems > 1) { 3878 /* 3879 * ERROR: We expected an object, 3880 * not this identifier. 3881 */ 3882 return (NULL); 3883 } 3884 return (dest); 3885 } else { 3886 cur--; 3887 state = DTRACE_JSON_COMMA; 3888 break; 3889 } 3890 } 3891 3892 /* 3893 * ERROR: we did not recognise the identifier as one 3894 * of those in the JSON specification. 3895 */ 3896 return (NULL); 3897 case DTRACE_JSON_NUMBER: 3898 if (cc == '.') { 3899 *dd++ = cc; 3900 state = DTRACE_JSON_NUMBER_FRAC; 3901 break; 3902 } 3903 3904 if (cc == 'x' || cc == 'X') { 3905 /* 3906 * ERROR: specification explicitly excludes 3907 * hexidecimal or octal numbers. 3908 */ 3909 return (NULL); 3910 } 3911 3912 /* FALLTHRU */ 3913 case DTRACE_JSON_NUMBER_FRAC: 3914 if (cc == 'e' || cc == 'E') { 3915 *dd++ = cc; 3916 state = DTRACE_JSON_NUMBER_EXP; 3917 break; 3918 } 3919 3920 if (cc == '+' || cc == '-') { 3921 /* 3922 * ERROR: expect sign as part of exponent only. 3923 */ 3924 return (NULL); 3925 } 3926 /* FALLTHRU */ 3927 case DTRACE_JSON_NUMBER_EXP: 3928 if (isdigit(cc) || cc == '+' || cc == '-') { 3929 *dd++ = cc; 3930 break; 3931 } 3932 3933 *dd = '\0'; 3934 dd = dest; /* reset string buffer */ 3935 if (found_key) { 3936 if (nelems > 1) { 3937 /* 3938 * ERROR: We expected an object, not 3939 * this number. 3940 */ 3941 return (NULL); 3942 } 3943 return (dest); 3944 } 3945 3946 cur--; 3947 state = DTRACE_JSON_COMMA; 3948 break; 3949 case DTRACE_JSON_VALUE: 3950 if (isspace(cc)) 3951 break; 3952 3953 if (cc == '{' || cc == '[') { 3954 if (nelems > 1 && found_key) { 3955 in_array = cc == '[' ? B_TRUE : B_FALSE; 3956 /* 3957 * If our element selector directs us 3958 * to descend into this nested object, 3959 * then move to the next selector 3960 * element in the list and restart the 3961 * state machine. 3962 */ 3963 while (*elem != '\0') 3964 elem++; 3965 elem++; /* skip the inter-element NUL */ 3966 nelems--; 3967 dd = dest; 3968 if (in_array) { 3969 state = DTRACE_JSON_VALUE; 3970 array_pos = 0; 3971 array_elem = dtrace_strtoll( 3972 elem, 10, size); 3973 found_key = array_elem == 0 ? 3974 B_TRUE : B_FALSE; 3975 } else { 3976 found_key = B_FALSE; 3977 state = DTRACE_JSON_OBJECT; 3978 } 3979 break; 3980 } 3981 3982 /* 3983 * Otherwise, we wish to either skip this 3984 * nested object or return it in full. 3985 */ 3986 if (cc == '[') 3987 brackets = 1; 3988 else 3989 braces = 1; 3990 *dd++ = cc; 3991 state = DTRACE_JSON_COLLECT_OBJECT; 3992 break; 3993 } 3994 3995 if (cc == '"') { 3996 state = DTRACE_JSON_STRING; 3997 break; 3998 } 3999 4000 if (islower(cc)) { 4001 /* 4002 * Here we deal with true, false and null. 4003 */ 4004 *dd++ = cc; 4005 state = DTRACE_JSON_IDENTIFIER; 4006 break; 4007 } 4008 4009 if (cc == '-' || isdigit(cc)) { 4010 *dd++ = cc; 4011 state = DTRACE_JSON_NUMBER; 4012 break; 4013 } 4014 4015 /* 4016 * ERROR: unexpected character at start of value. 4017 */ 4018 return (NULL); 4019 case DTRACE_JSON_COLLECT_OBJECT: 4020 if (cc == '\0') 4021 /* 4022 * ERROR: unexpected end of input. 4023 */ 4024 return (NULL); 4025 4026 *dd++ = cc; 4027 if (cc == '"') { 4028 collect_object = B_TRUE; 4029 state = DTRACE_JSON_STRING; 4030 break; 4031 } 4032 4033 if (cc == ']') { 4034 if (brackets-- == 0) { 4035 /* 4036 * ERROR: unbalanced brackets. 4037 */ 4038 return (NULL); 4039 } 4040 } else if (cc == '}') { 4041 if (braces-- == 0) { 4042 /* 4043 * ERROR: unbalanced braces. 4044 */ 4045 return (NULL); 4046 } 4047 } else if (cc == '{') { 4048 braces++; 4049 } else if (cc == '[') { 4050 brackets++; 4051 } 4052 4053 if (brackets == 0 && braces == 0) { 4054 if (found_key) { 4055 *dd = '\0'; 4056 return (dest); 4057 } 4058 dd = dest; /* reset string buffer */ 4059 state = DTRACE_JSON_COMMA; 4060 } 4061 break; 4062 } 4063 } 4064 return (NULL); 4065 } 4066 4067 /* 4068 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 4069 * Notice that we don't bother validating the proper number of arguments or 4070 * their types in the tuple stack. This isn't needed because all argument 4071 * interpretation is safe because of our load safety -- the worst that can 4072 * happen is that a bogus program can obtain bogus results. 4073 */ 4074 static void 4075 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 4076 dtrace_key_t *tupregs, int nargs, 4077 dtrace_mstate_t *mstate, dtrace_state_t *state) 4078 { 4079 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4080 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4081 dtrace_vstate_t *vstate = &state->dts_vstate; 4082 4083 #ifdef illumos 4084 union { 4085 mutex_impl_t mi; 4086 uint64_t mx; 4087 } m; 4088 4089 union { 4090 krwlock_t ri; 4091 uintptr_t rw; 4092 } r; 4093 #else 4094 struct thread *lowner; 4095 union { 4096 struct lock_object *li; 4097 uintptr_t lx; 4098 } l; 4099 #endif 4100 4101 switch (subr) { 4102 case DIF_SUBR_RAND: 4103 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 4104 break; 4105 4106 #ifdef illumos 4107 case DIF_SUBR_MUTEX_OWNED: 4108 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4109 mstate, vstate)) { 4110 regs[rd] = 0; 4111 break; 4112 } 4113 4114 m.mx = dtrace_load64(tupregs[0].dttk_value); 4115 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 4116 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 4117 else 4118 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 4119 break; 4120 4121 case DIF_SUBR_MUTEX_OWNER: 4122 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4123 mstate, vstate)) { 4124 regs[rd] = 0; 4125 break; 4126 } 4127 4128 m.mx = dtrace_load64(tupregs[0].dttk_value); 4129 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 4130 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 4131 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 4132 else 4133 regs[rd] = 0; 4134 break; 4135 4136 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4137 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4138 mstate, vstate)) { 4139 regs[rd] = 0; 4140 break; 4141 } 4142 4143 m.mx = dtrace_load64(tupregs[0].dttk_value); 4144 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 4145 break; 4146 4147 case DIF_SUBR_MUTEX_TYPE_SPIN: 4148 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 4149 mstate, vstate)) { 4150 regs[rd] = 0; 4151 break; 4152 } 4153 4154 m.mx = dtrace_load64(tupregs[0].dttk_value); 4155 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 4156 break; 4157 4158 case DIF_SUBR_RW_READ_HELD: { 4159 uintptr_t tmp; 4160 4161 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4162 mstate, vstate)) { 4163 regs[rd] = 0; 4164 break; 4165 } 4166 4167 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4168 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 4169 break; 4170 } 4171 4172 case DIF_SUBR_RW_WRITE_HELD: 4173 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4174 mstate, vstate)) { 4175 regs[rd] = 0; 4176 break; 4177 } 4178 4179 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4180 regs[rd] = _RW_WRITE_HELD(&r.ri); 4181 break; 4182 4183 case DIF_SUBR_RW_ISWRITER: 4184 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 4185 mstate, vstate)) { 4186 regs[rd] = 0; 4187 break; 4188 } 4189 4190 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 4191 regs[rd] = _RW_ISWRITER(&r.ri); 4192 break; 4193 4194 #else /* !illumos */ 4195 case DIF_SUBR_MUTEX_OWNED: 4196 if (!dtrace_canload(tupregs[0].dttk_value, 4197 sizeof (struct lock_object), mstate, vstate)) { 4198 regs[rd] = 0; 4199 break; 4200 } 4201 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4202 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4203 break; 4204 4205 case DIF_SUBR_MUTEX_OWNER: 4206 if (!dtrace_canload(tupregs[0].dttk_value, 4207 sizeof (struct lock_object), mstate, vstate)) { 4208 regs[rd] = 0; 4209 break; 4210 } 4211 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4212 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4213 regs[rd] = (uintptr_t)lowner; 4214 break; 4215 4216 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 4217 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4218 mstate, vstate)) { 4219 regs[rd] = 0; 4220 break; 4221 } 4222 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4223 /* XXX - should be only LC_SLEEPABLE? */ 4224 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 4225 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 4226 break; 4227 4228 case DIF_SUBR_MUTEX_TYPE_SPIN: 4229 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 4230 mstate, vstate)) { 4231 regs[rd] = 0; 4232 break; 4233 } 4234 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4235 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 4236 break; 4237 4238 case DIF_SUBR_RW_READ_HELD: 4239 case DIF_SUBR_SX_SHARED_HELD: 4240 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4241 mstate, vstate)) { 4242 regs[rd] = 0; 4243 break; 4244 } 4245 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 4246 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4247 lowner == NULL; 4248 break; 4249 4250 case DIF_SUBR_RW_WRITE_HELD: 4251 case DIF_SUBR_SX_EXCLUSIVE_HELD: 4252 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4253 mstate, vstate)) { 4254 regs[rd] = 0; 4255 break; 4256 } 4257 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4258 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 4259 regs[rd] = (lowner == curthread); 4260 break; 4261 4262 case DIF_SUBR_RW_ISWRITER: 4263 case DIF_SUBR_SX_ISEXCLUSIVE: 4264 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 4265 mstate, vstate)) { 4266 regs[rd] = 0; 4267 break; 4268 } 4269 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 4270 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 4271 lowner != NULL; 4272 break; 4273 #endif /* illumos */ 4274 4275 case DIF_SUBR_BCOPY: { 4276 /* 4277 * We need to be sure that the destination is in the scratch 4278 * region -- no other region is allowed. 4279 */ 4280 uintptr_t src = tupregs[0].dttk_value; 4281 uintptr_t dest = tupregs[1].dttk_value; 4282 size_t size = tupregs[2].dttk_value; 4283 4284 if (!dtrace_inscratch(dest, size, mstate)) { 4285 *flags |= CPU_DTRACE_BADADDR; 4286 *illval = regs[rd]; 4287 break; 4288 } 4289 4290 if (!dtrace_canload(src, size, mstate, vstate)) { 4291 regs[rd] = 0; 4292 break; 4293 } 4294 4295 dtrace_bcopy((void *)src, (void *)dest, size); 4296 break; 4297 } 4298 4299 case DIF_SUBR_ALLOCA: 4300 case DIF_SUBR_COPYIN: { 4301 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4302 uint64_t size = 4303 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 4304 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 4305 4306 /* 4307 * This action doesn't require any credential checks since 4308 * probes will not activate in user contexts to which the 4309 * enabling user does not have permissions. 4310 */ 4311 4312 /* 4313 * Rounding up the user allocation size could have overflowed 4314 * a large, bogus allocation (like -1ULL) to 0. 4315 */ 4316 if (scratch_size < size || 4317 !DTRACE_INSCRATCH(mstate, scratch_size)) { 4318 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4319 regs[rd] = 0; 4320 break; 4321 } 4322 4323 if (subr == DIF_SUBR_COPYIN) { 4324 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4325 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4326 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4327 } 4328 4329 mstate->dtms_scratch_ptr += scratch_size; 4330 regs[rd] = dest; 4331 break; 4332 } 4333 4334 case DIF_SUBR_COPYINTO: { 4335 uint64_t size = tupregs[1].dttk_value; 4336 uintptr_t dest = tupregs[2].dttk_value; 4337 4338 /* 4339 * This action doesn't require any credential checks since 4340 * probes will not activate in user contexts to which the 4341 * enabling user does not have permissions. 4342 */ 4343 if (!dtrace_inscratch(dest, size, mstate)) { 4344 *flags |= CPU_DTRACE_BADADDR; 4345 *illval = regs[rd]; 4346 break; 4347 } 4348 4349 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4350 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 4351 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4352 break; 4353 } 4354 4355 case DIF_SUBR_COPYINSTR: { 4356 uintptr_t dest = mstate->dtms_scratch_ptr; 4357 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4358 4359 if (nargs > 1 && tupregs[1].dttk_value < size) 4360 size = tupregs[1].dttk_value + 1; 4361 4362 /* 4363 * This action doesn't require any credential checks since 4364 * probes will not activate in user contexts to which the 4365 * enabling user does not have permissions. 4366 */ 4367 if (!DTRACE_INSCRATCH(mstate, size)) { 4368 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4369 regs[rd] = 0; 4370 break; 4371 } 4372 4373 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4374 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 4375 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4376 4377 ((char *)dest)[size - 1] = '\0'; 4378 mstate->dtms_scratch_ptr += size; 4379 regs[rd] = dest; 4380 break; 4381 } 4382 4383 #ifdef illumos 4384 case DIF_SUBR_MSGSIZE: 4385 case DIF_SUBR_MSGDSIZE: { 4386 uintptr_t baddr = tupregs[0].dttk_value, daddr; 4387 uintptr_t wptr, rptr; 4388 size_t count = 0; 4389 int cont = 0; 4390 4391 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 4392 4393 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 4394 vstate)) { 4395 regs[rd] = 0; 4396 break; 4397 } 4398 4399 wptr = dtrace_loadptr(baddr + 4400 offsetof(mblk_t, b_wptr)); 4401 4402 rptr = dtrace_loadptr(baddr + 4403 offsetof(mblk_t, b_rptr)); 4404 4405 if (wptr < rptr) { 4406 *flags |= CPU_DTRACE_BADADDR; 4407 *illval = tupregs[0].dttk_value; 4408 break; 4409 } 4410 4411 daddr = dtrace_loadptr(baddr + 4412 offsetof(mblk_t, b_datap)); 4413 4414 baddr = dtrace_loadptr(baddr + 4415 offsetof(mblk_t, b_cont)); 4416 4417 /* 4418 * We want to prevent against denial-of-service here, 4419 * so we're only going to search the list for 4420 * dtrace_msgdsize_max mblks. 4421 */ 4422 if (cont++ > dtrace_msgdsize_max) { 4423 *flags |= CPU_DTRACE_ILLOP; 4424 break; 4425 } 4426 4427 if (subr == DIF_SUBR_MSGDSIZE) { 4428 if (dtrace_load8(daddr + 4429 offsetof(dblk_t, db_type)) != M_DATA) 4430 continue; 4431 } 4432 4433 count += wptr - rptr; 4434 } 4435 4436 if (!(*flags & CPU_DTRACE_FAULT)) 4437 regs[rd] = count; 4438 4439 break; 4440 } 4441 #endif 4442 4443 case DIF_SUBR_PROGENYOF: { 4444 pid_t pid = tupregs[0].dttk_value; 4445 proc_t *p; 4446 int rval = 0; 4447 4448 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4449 4450 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 4451 #ifdef illumos 4452 if (p->p_pidp->pid_id == pid) { 4453 #else 4454 if (p->p_pid == pid) { 4455 #endif 4456 rval = 1; 4457 break; 4458 } 4459 } 4460 4461 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4462 4463 regs[rd] = rval; 4464 break; 4465 } 4466 4467 case DIF_SUBR_SPECULATION: 4468 regs[rd] = dtrace_speculation(state); 4469 break; 4470 4471 case DIF_SUBR_COPYOUT: { 4472 uintptr_t kaddr = tupregs[0].dttk_value; 4473 uintptr_t uaddr = tupregs[1].dttk_value; 4474 uint64_t size = tupregs[2].dttk_value; 4475 4476 if (!dtrace_destructive_disallow && 4477 dtrace_priv_proc_control(state) && 4478 !dtrace_istoxic(kaddr, size) && 4479 dtrace_canload(kaddr, size, mstate, vstate)) { 4480 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4481 dtrace_copyout(kaddr, uaddr, size, flags); 4482 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4483 } 4484 break; 4485 } 4486 4487 case DIF_SUBR_COPYOUTSTR: { 4488 uintptr_t kaddr = tupregs[0].dttk_value; 4489 uintptr_t uaddr = tupregs[1].dttk_value; 4490 uint64_t size = tupregs[2].dttk_value; 4491 4492 if (!dtrace_destructive_disallow && 4493 dtrace_priv_proc_control(state) && 4494 !dtrace_istoxic(kaddr, size) && 4495 dtrace_strcanload(kaddr, size, mstate, vstate)) { 4496 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4497 dtrace_copyoutstr(kaddr, uaddr, size, flags); 4498 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4499 } 4500 break; 4501 } 4502 4503 case DIF_SUBR_STRLEN: { 4504 size_t sz; 4505 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 4506 sz = dtrace_strlen((char *)addr, 4507 state->dts_options[DTRACEOPT_STRSIZE]); 4508 4509 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 4510 regs[rd] = 0; 4511 break; 4512 } 4513 4514 regs[rd] = sz; 4515 4516 break; 4517 } 4518 4519 case DIF_SUBR_STRCHR: 4520 case DIF_SUBR_STRRCHR: { 4521 /* 4522 * We're going to iterate over the string looking for the 4523 * specified character. We will iterate until we have reached 4524 * the string length or we have found the character. If this 4525 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 4526 * of the specified character instead of the first. 4527 */ 4528 uintptr_t saddr = tupregs[0].dttk_value; 4529 uintptr_t addr = tupregs[0].dttk_value; 4530 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 4531 char c, target = (char)tupregs[1].dttk_value; 4532 4533 for (regs[rd] = 0; addr < limit; addr++) { 4534 if ((c = dtrace_load8(addr)) == target) { 4535 regs[rd] = addr; 4536 4537 if (subr == DIF_SUBR_STRCHR) 4538 break; 4539 } 4540 4541 if (c == '\0') 4542 break; 4543 } 4544 4545 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 4546 regs[rd] = 0; 4547 break; 4548 } 4549 4550 break; 4551 } 4552 4553 case DIF_SUBR_STRSTR: 4554 case DIF_SUBR_INDEX: 4555 case DIF_SUBR_RINDEX: { 4556 /* 4557 * We're going to iterate over the string looking for the 4558 * specified string. We will iterate until we have reached 4559 * the string length or we have found the string. (Yes, this 4560 * is done in the most naive way possible -- but considering 4561 * that the string we're searching for is likely to be 4562 * relatively short, the complexity of Rabin-Karp or similar 4563 * hardly seems merited.) 4564 */ 4565 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 4566 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 4567 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4568 size_t len = dtrace_strlen(addr, size); 4569 size_t sublen = dtrace_strlen(substr, size); 4570 char *limit = addr + len, *orig = addr; 4571 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 4572 int inc = 1; 4573 4574 regs[rd] = notfound; 4575 4576 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 4577 regs[rd] = 0; 4578 break; 4579 } 4580 4581 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 4582 vstate)) { 4583 regs[rd] = 0; 4584 break; 4585 } 4586 4587 /* 4588 * strstr() and index()/rindex() have similar semantics if 4589 * both strings are the empty string: strstr() returns a 4590 * pointer to the (empty) string, and index() and rindex() 4591 * both return index 0 (regardless of any position argument). 4592 */ 4593 if (sublen == 0 && len == 0) { 4594 if (subr == DIF_SUBR_STRSTR) 4595 regs[rd] = (uintptr_t)addr; 4596 else 4597 regs[rd] = 0; 4598 break; 4599 } 4600 4601 if (subr != DIF_SUBR_STRSTR) { 4602 if (subr == DIF_SUBR_RINDEX) { 4603 limit = orig - 1; 4604 addr += len; 4605 inc = -1; 4606 } 4607 4608 /* 4609 * Both index() and rindex() take an optional position 4610 * argument that denotes the starting position. 4611 */ 4612 if (nargs == 3) { 4613 int64_t pos = (int64_t)tupregs[2].dttk_value; 4614 4615 /* 4616 * If the position argument to index() is 4617 * negative, Perl implicitly clamps it at 4618 * zero. This semantic is a little surprising 4619 * given the special meaning of negative 4620 * positions to similar Perl functions like 4621 * substr(), but it appears to reflect a 4622 * notion that index() can start from a 4623 * negative index and increment its way up to 4624 * the string. Given this notion, Perl's 4625 * rindex() is at least self-consistent in 4626 * that it implicitly clamps positions greater 4627 * than the string length to be the string 4628 * length. Where Perl completely loses 4629 * coherence, however, is when the specified 4630 * substring is the empty string (""). In 4631 * this case, even if the position is 4632 * negative, rindex() returns 0 -- and even if 4633 * the position is greater than the length, 4634 * index() returns the string length. These 4635 * semantics violate the notion that index() 4636 * should never return a value less than the 4637 * specified position and that rindex() should 4638 * never return a value greater than the 4639 * specified position. (One assumes that 4640 * these semantics are artifacts of Perl's 4641 * implementation and not the results of 4642 * deliberate design -- it beggars belief that 4643 * even Larry Wall could desire such oddness.) 4644 * While in the abstract one would wish for 4645 * consistent position semantics across 4646 * substr(), index() and rindex() -- or at the 4647 * very least self-consistent position 4648 * semantics for index() and rindex() -- we 4649 * instead opt to keep with the extant Perl 4650 * semantics, in all their broken glory. (Do 4651 * we have more desire to maintain Perl's 4652 * semantics than Perl does? Probably.) 4653 */ 4654 if (subr == DIF_SUBR_RINDEX) { 4655 if (pos < 0) { 4656 if (sublen == 0) 4657 regs[rd] = 0; 4658 break; 4659 } 4660 4661 if (pos > len) 4662 pos = len; 4663 } else { 4664 if (pos < 0) 4665 pos = 0; 4666 4667 if (pos >= len) { 4668 if (sublen == 0) 4669 regs[rd] = len; 4670 break; 4671 } 4672 } 4673 4674 addr = orig + pos; 4675 } 4676 } 4677 4678 for (regs[rd] = notfound; addr != limit; addr += inc) { 4679 if (dtrace_strncmp(addr, substr, sublen) == 0) { 4680 if (subr != DIF_SUBR_STRSTR) { 4681 /* 4682 * As D index() and rindex() are 4683 * modeled on Perl (and not on awk), 4684 * we return a zero-based (and not a 4685 * one-based) index. (For you Perl 4686 * weenies: no, we're not going to add 4687 * $[ -- and shouldn't you be at a con 4688 * or something?) 4689 */ 4690 regs[rd] = (uintptr_t)(addr - orig); 4691 break; 4692 } 4693 4694 ASSERT(subr == DIF_SUBR_STRSTR); 4695 regs[rd] = (uintptr_t)addr; 4696 break; 4697 } 4698 } 4699 4700 break; 4701 } 4702 4703 case DIF_SUBR_STRTOK: { 4704 uintptr_t addr = tupregs[0].dttk_value; 4705 uintptr_t tokaddr = tupregs[1].dttk_value; 4706 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4707 uintptr_t limit, toklimit = tokaddr + size; 4708 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 4709 char *dest = (char *)mstate->dtms_scratch_ptr; 4710 int i; 4711 4712 /* 4713 * Check both the token buffer and (later) the input buffer, 4714 * since both could be non-scratch addresses. 4715 */ 4716 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 4717 regs[rd] = 0; 4718 break; 4719 } 4720 4721 if (!DTRACE_INSCRATCH(mstate, size)) { 4722 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4723 regs[rd] = 0; 4724 break; 4725 } 4726 4727 if (addr == 0) { 4728 /* 4729 * If the address specified is NULL, we use our saved 4730 * strtok pointer from the mstate. Note that this 4731 * means that the saved strtok pointer is _only_ 4732 * valid within multiple enablings of the same probe -- 4733 * it behaves like an implicit clause-local variable. 4734 */ 4735 addr = mstate->dtms_strtok; 4736 } else { 4737 /* 4738 * If the user-specified address is non-NULL we must 4739 * access check it. This is the only time we have 4740 * a chance to do so, since this address may reside 4741 * in the string table of this clause-- future calls 4742 * (when we fetch addr from mstate->dtms_strtok) 4743 * would fail this access check. 4744 */ 4745 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 4746 regs[rd] = 0; 4747 break; 4748 } 4749 } 4750 4751 /* 4752 * First, zero the token map, and then process the token 4753 * string -- setting a bit in the map for every character 4754 * found in the token string. 4755 */ 4756 for (i = 0; i < sizeof (tokmap); i++) 4757 tokmap[i] = 0; 4758 4759 for (; tokaddr < toklimit; tokaddr++) { 4760 if ((c = dtrace_load8(tokaddr)) == '\0') 4761 break; 4762 4763 ASSERT((c >> 3) < sizeof (tokmap)); 4764 tokmap[c >> 3] |= (1 << (c & 0x7)); 4765 } 4766 4767 for (limit = addr + size; addr < limit; addr++) { 4768 /* 4769 * We're looking for a character that is _not_ contained 4770 * in the token string. 4771 */ 4772 if ((c = dtrace_load8(addr)) == '\0') 4773 break; 4774 4775 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 4776 break; 4777 } 4778 4779 if (c == '\0') { 4780 /* 4781 * We reached the end of the string without finding 4782 * any character that was not in the token string. 4783 * We return NULL in this case, and we set the saved 4784 * address to NULL as well. 4785 */ 4786 regs[rd] = 0; 4787 mstate->dtms_strtok = 0; 4788 break; 4789 } 4790 4791 /* 4792 * From here on, we're copying into the destination string. 4793 */ 4794 for (i = 0; addr < limit && i < size - 1; addr++) { 4795 if ((c = dtrace_load8(addr)) == '\0') 4796 break; 4797 4798 if (tokmap[c >> 3] & (1 << (c & 0x7))) 4799 break; 4800 4801 ASSERT(i < size); 4802 dest[i++] = c; 4803 } 4804 4805 ASSERT(i < size); 4806 dest[i] = '\0'; 4807 regs[rd] = (uintptr_t)dest; 4808 mstate->dtms_scratch_ptr += size; 4809 mstate->dtms_strtok = addr; 4810 break; 4811 } 4812 4813 case DIF_SUBR_SUBSTR: { 4814 uintptr_t s = tupregs[0].dttk_value; 4815 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4816 char *d = (char *)mstate->dtms_scratch_ptr; 4817 int64_t index = (int64_t)tupregs[1].dttk_value; 4818 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4819 size_t len = dtrace_strlen((char *)s, size); 4820 int64_t i; 4821 4822 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4823 regs[rd] = 0; 4824 break; 4825 } 4826 4827 if (!DTRACE_INSCRATCH(mstate, size)) { 4828 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4829 regs[rd] = 0; 4830 break; 4831 } 4832 4833 if (nargs <= 2) 4834 remaining = (int64_t)size; 4835 4836 if (index < 0) { 4837 index += len; 4838 4839 if (index < 0 && index + remaining > 0) { 4840 remaining += index; 4841 index = 0; 4842 } 4843 } 4844 4845 if (index >= len || index < 0) { 4846 remaining = 0; 4847 } else if (remaining < 0) { 4848 remaining += len - index; 4849 } else if (index + remaining > size) { 4850 remaining = size - index; 4851 } 4852 4853 for (i = 0; i < remaining; i++) { 4854 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4855 break; 4856 } 4857 4858 d[i] = '\0'; 4859 4860 mstate->dtms_scratch_ptr += size; 4861 regs[rd] = (uintptr_t)d; 4862 break; 4863 } 4864 4865 case DIF_SUBR_JSON: { 4866 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4867 uintptr_t json = tupregs[0].dttk_value; 4868 size_t jsonlen = dtrace_strlen((char *)json, size); 4869 uintptr_t elem = tupregs[1].dttk_value; 4870 size_t elemlen = dtrace_strlen((char *)elem, size); 4871 4872 char *dest = (char *)mstate->dtms_scratch_ptr; 4873 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; 4874 char *ee = elemlist; 4875 int nelems = 1; 4876 uintptr_t cur; 4877 4878 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || 4879 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { 4880 regs[rd] = 0; 4881 break; 4882 } 4883 4884 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { 4885 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4886 regs[rd] = 0; 4887 break; 4888 } 4889 4890 /* 4891 * Read the element selector and split it up into a packed list 4892 * of strings. 4893 */ 4894 for (cur = elem; cur < elem + elemlen; cur++) { 4895 char cc = dtrace_load8(cur); 4896 4897 if (cur == elem && cc == '[') { 4898 /* 4899 * If the first element selector key is 4900 * actually an array index then ignore the 4901 * bracket. 4902 */ 4903 continue; 4904 } 4905 4906 if (cc == ']') 4907 continue; 4908 4909 if (cc == '.' || cc == '[') { 4910 nelems++; 4911 cc = '\0'; 4912 } 4913 4914 *ee++ = cc; 4915 } 4916 *ee++ = '\0'; 4917 4918 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, 4919 nelems, dest)) != 0) 4920 mstate->dtms_scratch_ptr += jsonlen + 1; 4921 break; 4922 } 4923 4924 case DIF_SUBR_TOUPPER: 4925 case DIF_SUBR_TOLOWER: { 4926 uintptr_t s = tupregs[0].dttk_value; 4927 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4928 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4929 size_t len = dtrace_strlen((char *)s, size); 4930 char lower, upper, convert; 4931 int64_t i; 4932 4933 if (subr == DIF_SUBR_TOUPPER) { 4934 lower = 'a'; 4935 upper = 'z'; 4936 convert = 'A'; 4937 } else { 4938 lower = 'A'; 4939 upper = 'Z'; 4940 convert = 'a'; 4941 } 4942 4943 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4944 regs[rd] = 0; 4945 break; 4946 } 4947 4948 if (!DTRACE_INSCRATCH(mstate, size)) { 4949 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4950 regs[rd] = 0; 4951 break; 4952 } 4953 4954 for (i = 0; i < size - 1; i++) { 4955 if ((c = dtrace_load8(s + i)) == '\0') 4956 break; 4957 4958 if (c >= lower && c <= upper) 4959 c = convert + (c - lower); 4960 4961 dest[i] = c; 4962 } 4963 4964 ASSERT(i < size); 4965 dest[i] = '\0'; 4966 regs[rd] = (uintptr_t)dest; 4967 mstate->dtms_scratch_ptr += size; 4968 break; 4969 } 4970 4971 #ifdef illumos 4972 case DIF_SUBR_GETMAJOR: 4973 #ifdef _LP64 4974 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4975 #else 4976 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4977 #endif 4978 break; 4979 4980 case DIF_SUBR_GETMINOR: 4981 #ifdef _LP64 4982 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4983 #else 4984 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4985 #endif 4986 break; 4987 4988 case DIF_SUBR_DDI_PATHNAME: { 4989 /* 4990 * This one is a galactic mess. We are going to roughly 4991 * emulate ddi_pathname(), but it's made more complicated 4992 * by the fact that we (a) want to include the minor name and 4993 * (b) must proceed iteratively instead of recursively. 4994 */ 4995 uintptr_t dest = mstate->dtms_scratch_ptr; 4996 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4997 char *start = (char *)dest, *end = start + size - 1; 4998 uintptr_t daddr = tupregs[0].dttk_value; 4999 int64_t minor = (int64_t)tupregs[1].dttk_value; 5000 char *s; 5001 int i, len, depth = 0; 5002 5003 /* 5004 * Due to all the pointer jumping we do and context we must 5005 * rely upon, we just mandate that the user must have kernel 5006 * read privileges to use this routine. 5007 */ 5008 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 5009 *flags |= CPU_DTRACE_KPRIV; 5010 *illval = daddr; 5011 regs[rd] = 0; 5012 } 5013 5014 if (!DTRACE_INSCRATCH(mstate, size)) { 5015 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5016 regs[rd] = 0; 5017 break; 5018 } 5019 5020 *end = '\0'; 5021 5022 /* 5023 * We want to have a name for the minor. In order to do this, 5024 * we need to walk the minor list from the devinfo. We want 5025 * to be sure that we don't infinitely walk a circular list, 5026 * so we check for circularity by sending a scout pointer 5027 * ahead two elements for every element that we iterate over; 5028 * if the list is circular, these will ultimately point to the 5029 * same element. You may recognize this little trick as the 5030 * answer to a stupid interview question -- one that always 5031 * seems to be asked by those who had to have it laboriously 5032 * explained to them, and who can't even concisely describe 5033 * the conditions under which one would be forced to resort to 5034 * this technique. Needless to say, those conditions are 5035 * found here -- and probably only here. Is this the only use 5036 * of this infamous trick in shipping, production code? If it 5037 * isn't, it probably should be... 5038 */ 5039 if (minor != -1) { 5040 uintptr_t maddr = dtrace_loadptr(daddr + 5041 offsetof(struct dev_info, devi_minor)); 5042 5043 uintptr_t next = offsetof(struct ddi_minor_data, next); 5044 uintptr_t name = offsetof(struct ddi_minor_data, 5045 d_minor) + offsetof(struct ddi_minor, name); 5046 uintptr_t dev = offsetof(struct ddi_minor_data, 5047 d_minor) + offsetof(struct ddi_minor, dev); 5048 uintptr_t scout; 5049 5050 if (maddr != NULL) 5051 scout = dtrace_loadptr(maddr + next); 5052 5053 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5054 uint64_t m; 5055 #ifdef _LP64 5056 m = dtrace_load64(maddr + dev) & MAXMIN64; 5057 #else 5058 m = dtrace_load32(maddr + dev) & MAXMIN; 5059 #endif 5060 if (m != minor) { 5061 maddr = dtrace_loadptr(maddr + next); 5062 5063 if (scout == NULL) 5064 continue; 5065 5066 scout = dtrace_loadptr(scout + next); 5067 5068 if (scout == NULL) 5069 continue; 5070 5071 scout = dtrace_loadptr(scout + next); 5072 5073 if (scout == NULL) 5074 continue; 5075 5076 if (scout == maddr) { 5077 *flags |= CPU_DTRACE_ILLOP; 5078 break; 5079 } 5080 5081 continue; 5082 } 5083 5084 /* 5085 * We have the minor data. Now we need to 5086 * copy the minor's name into the end of the 5087 * pathname. 5088 */ 5089 s = (char *)dtrace_loadptr(maddr + name); 5090 len = dtrace_strlen(s, size); 5091 5092 if (*flags & CPU_DTRACE_FAULT) 5093 break; 5094 5095 if (len != 0) { 5096 if ((end -= (len + 1)) < start) 5097 break; 5098 5099 *end = ':'; 5100 } 5101 5102 for (i = 1; i <= len; i++) 5103 end[i] = dtrace_load8((uintptr_t)s++); 5104 break; 5105 } 5106 } 5107 5108 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 5109 ddi_node_state_t devi_state; 5110 5111 devi_state = dtrace_load32(daddr + 5112 offsetof(struct dev_info, devi_node_state)); 5113 5114 if (*flags & CPU_DTRACE_FAULT) 5115 break; 5116 5117 if (devi_state >= DS_INITIALIZED) { 5118 s = (char *)dtrace_loadptr(daddr + 5119 offsetof(struct dev_info, devi_addr)); 5120 len = dtrace_strlen(s, size); 5121 5122 if (*flags & CPU_DTRACE_FAULT) 5123 break; 5124 5125 if (len != 0) { 5126 if ((end -= (len + 1)) < start) 5127 break; 5128 5129 *end = '@'; 5130 } 5131 5132 for (i = 1; i <= len; i++) 5133 end[i] = dtrace_load8((uintptr_t)s++); 5134 } 5135 5136 /* 5137 * Now for the node name... 5138 */ 5139 s = (char *)dtrace_loadptr(daddr + 5140 offsetof(struct dev_info, devi_node_name)); 5141 5142 daddr = dtrace_loadptr(daddr + 5143 offsetof(struct dev_info, devi_parent)); 5144 5145 /* 5146 * If our parent is NULL (that is, if we're the root 5147 * node), we're going to use the special path 5148 * "devices". 5149 */ 5150 if (daddr == 0) 5151 s = "devices"; 5152 5153 len = dtrace_strlen(s, size); 5154 if (*flags & CPU_DTRACE_FAULT) 5155 break; 5156 5157 if ((end -= (len + 1)) < start) 5158 break; 5159 5160 for (i = 1; i <= len; i++) 5161 end[i] = dtrace_load8((uintptr_t)s++); 5162 *end = '/'; 5163 5164 if (depth++ > dtrace_devdepth_max) { 5165 *flags |= CPU_DTRACE_ILLOP; 5166 break; 5167 } 5168 } 5169 5170 if (end < start) 5171 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5172 5173 if (daddr == 0) { 5174 regs[rd] = (uintptr_t)end; 5175 mstate->dtms_scratch_ptr += size; 5176 } 5177 5178 break; 5179 } 5180 #endif 5181 5182 case DIF_SUBR_STRJOIN: { 5183 char *d = (char *)mstate->dtms_scratch_ptr; 5184 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5185 uintptr_t s1 = tupregs[0].dttk_value; 5186 uintptr_t s2 = tupregs[1].dttk_value; 5187 int i = 0; 5188 5189 if (!dtrace_strcanload(s1, size, mstate, vstate) || 5190 !dtrace_strcanload(s2, size, mstate, vstate)) { 5191 regs[rd] = 0; 5192 break; 5193 } 5194 5195 if (!DTRACE_INSCRATCH(mstate, size)) { 5196 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5197 regs[rd] = 0; 5198 break; 5199 } 5200 5201 for (;;) { 5202 if (i >= size) { 5203 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5204 regs[rd] = 0; 5205 break; 5206 } 5207 5208 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 5209 i--; 5210 break; 5211 } 5212 } 5213 5214 for (;;) { 5215 if (i >= size) { 5216 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5217 regs[rd] = 0; 5218 break; 5219 } 5220 5221 if ((d[i++] = dtrace_load8(s2++)) == '\0') 5222 break; 5223 } 5224 5225 if (i < size) { 5226 mstate->dtms_scratch_ptr += i; 5227 regs[rd] = (uintptr_t)d; 5228 } 5229 5230 break; 5231 } 5232 5233 case DIF_SUBR_STRTOLL: { 5234 uintptr_t s = tupregs[0].dttk_value; 5235 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5236 int base = 10; 5237 5238 if (nargs > 1) { 5239 if ((base = tupregs[1].dttk_value) <= 1 || 5240 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5241 *flags |= CPU_DTRACE_ILLOP; 5242 break; 5243 } 5244 } 5245 5246 if (!dtrace_strcanload(s, size, mstate, vstate)) { 5247 regs[rd] = INT64_MIN; 5248 break; 5249 } 5250 5251 regs[rd] = dtrace_strtoll((char *)s, base, size); 5252 break; 5253 } 5254 5255 case DIF_SUBR_LLTOSTR: { 5256 int64_t i = (int64_t)tupregs[0].dttk_value; 5257 uint64_t val, digit; 5258 uint64_t size = 65; /* enough room for 2^64 in binary */ 5259 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 5260 int base = 10; 5261 5262 if (nargs > 1) { 5263 if ((base = tupregs[1].dttk_value) <= 1 || 5264 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 5265 *flags |= CPU_DTRACE_ILLOP; 5266 break; 5267 } 5268 } 5269 5270 val = (base == 10 && i < 0) ? i * -1 : i; 5271 5272 if (!DTRACE_INSCRATCH(mstate, size)) { 5273 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5274 regs[rd] = 0; 5275 break; 5276 } 5277 5278 for (*end-- = '\0'; val; val /= base) { 5279 if ((digit = val % base) <= '9' - '0') { 5280 *end-- = '0' + digit; 5281 } else { 5282 *end-- = 'a' + (digit - ('9' - '0') - 1); 5283 } 5284 } 5285 5286 if (i == 0 && base == 16) 5287 *end-- = '0'; 5288 5289 if (base == 16) 5290 *end-- = 'x'; 5291 5292 if (i == 0 || base == 8 || base == 16) 5293 *end-- = '0'; 5294 5295 if (i < 0 && base == 10) 5296 *end-- = '-'; 5297 5298 regs[rd] = (uintptr_t)end + 1; 5299 mstate->dtms_scratch_ptr += size; 5300 break; 5301 } 5302 5303 case DIF_SUBR_HTONS: 5304 case DIF_SUBR_NTOHS: 5305 #if BYTE_ORDER == BIG_ENDIAN 5306 regs[rd] = (uint16_t)tupregs[0].dttk_value; 5307 #else 5308 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 5309 #endif 5310 break; 5311 5312 5313 case DIF_SUBR_HTONL: 5314 case DIF_SUBR_NTOHL: 5315 #if BYTE_ORDER == BIG_ENDIAN 5316 regs[rd] = (uint32_t)tupregs[0].dttk_value; 5317 #else 5318 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 5319 #endif 5320 break; 5321 5322 5323 case DIF_SUBR_HTONLL: 5324 case DIF_SUBR_NTOHLL: 5325 #if BYTE_ORDER == BIG_ENDIAN 5326 regs[rd] = (uint64_t)tupregs[0].dttk_value; 5327 #else 5328 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 5329 #endif 5330 break; 5331 5332 5333 case DIF_SUBR_DIRNAME: 5334 case DIF_SUBR_BASENAME: { 5335 char *dest = (char *)mstate->dtms_scratch_ptr; 5336 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5337 uintptr_t src = tupregs[0].dttk_value; 5338 int i, j, len = dtrace_strlen((char *)src, size); 5339 int lastbase = -1, firstbase = -1, lastdir = -1; 5340 int start, end; 5341 5342 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 5343 regs[rd] = 0; 5344 break; 5345 } 5346 5347 if (!DTRACE_INSCRATCH(mstate, size)) { 5348 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5349 regs[rd] = 0; 5350 break; 5351 } 5352 5353 /* 5354 * The basename and dirname for a zero-length string is 5355 * defined to be "." 5356 */ 5357 if (len == 0) { 5358 len = 1; 5359 src = (uintptr_t)"."; 5360 } 5361 5362 /* 5363 * Start from the back of the string, moving back toward the 5364 * front until we see a character that isn't a slash. That 5365 * character is the last character in the basename. 5366 */ 5367 for (i = len - 1; i >= 0; i--) { 5368 if (dtrace_load8(src + i) != '/') 5369 break; 5370 } 5371 5372 if (i >= 0) 5373 lastbase = i; 5374 5375 /* 5376 * Starting from the last character in the basename, move 5377 * towards the front until we find a slash. The character 5378 * that we processed immediately before that is the first 5379 * character in the basename. 5380 */ 5381 for (; i >= 0; i--) { 5382 if (dtrace_load8(src + i) == '/') 5383 break; 5384 } 5385 5386 if (i >= 0) 5387 firstbase = i + 1; 5388 5389 /* 5390 * Now keep going until we find a non-slash character. That 5391 * character is the last character in the dirname. 5392 */ 5393 for (; i >= 0; i--) { 5394 if (dtrace_load8(src + i) != '/') 5395 break; 5396 } 5397 5398 if (i >= 0) 5399 lastdir = i; 5400 5401 ASSERT(!(lastbase == -1 && firstbase != -1)); 5402 ASSERT(!(firstbase == -1 && lastdir != -1)); 5403 5404 if (lastbase == -1) { 5405 /* 5406 * We didn't find a non-slash character. We know that 5407 * the length is non-zero, so the whole string must be 5408 * slashes. In either the dirname or the basename 5409 * case, we return '/'. 5410 */ 5411 ASSERT(firstbase == -1); 5412 firstbase = lastbase = lastdir = 0; 5413 } 5414 5415 if (firstbase == -1) { 5416 /* 5417 * The entire string consists only of a basename 5418 * component. If we're looking for dirname, we need 5419 * to change our string to be just "."; if we're 5420 * looking for a basename, we'll just set the first 5421 * character of the basename to be 0. 5422 */ 5423 if (subr == DIF_SUBR_DIRNAME) { 5424 ASSERT(lastdir == -1); 5425 src = (uintptr_t)"."; 5426 lastdir = 0; 5427 } else { 5428 firstbase = 0; 5429 } 5430 } 5431 5432 if (subr == DIF_SUBR_DIRNAME) { 5433 if (lastdir == -1) { 5434 /* 5435 * We know that we have a slash in the name -- 5436 * or lastdir would be set to 0, above. And 5437 * because lastdir is -1, we know that this 5438 * slash must be the first character. (That 5439 * is, the full string must be of the form 5440 * "/basename".) In this case, the last 5441 * character of the directory name is 0. 5442 */ 5443 lastdir = 0; 5444 } 5445 5446 start = 0; 5447 end = lastdir; 5448 } else { 5449 ASSERT(subr == DIF_SUBR_BASENAME); 5450 ASSERT(firstbase != -1 && lastbase != -1); 5451 start = firstbase; 5452 end = lastbase; 5453 } 5454 5455 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 5456 dest[j] = dtrace_load8(src + i); 5457 5458 dest[j] = '\0'; 5459 regs[rd] = (uintptr_t)dest; 5460 mstate->dtms_scratch_ptr += size; 5461 break; 5462 } 5463 5464 case DIF_SUBR_GETF: { 5465 uintptr_t fd = tupregs[0].dttk_value; 5466 struct filedesc *fdp; 5467 file_t *fp; 5468 5469 if (!dtrace_priv_proc(state)) { 5470 regs[rd] = 0; 5471 break; 5472 } 5473 fdp = curproc->p_fd; 5474 FILEDESC_SLOCK(fdp); 5475 fp = fget_locked(fdp, fd); 5476 mstate->dtms_getf = fp; 5477 regs[rd] = (uintptr_t)fp; 5478 FILEDESC_SUNLOCK(fdp); 5479 break; 5480 } 5481 5482 case DIF_SUBR_CLEANPATH: { 5483 char *dest = (char *)mstate->dtms_scratch_ptr, c; 5484 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 5485 uintptr_t src = tupregs[0].dttk_value; 5486 int i = 0, j = 0; 5487 #ifdef illumos 5488 zone_t *z; 5489 #endif 5490 5491 if (!dtrace_strcanload(src, size, mstate, vstate)) { 5492 regs[rd] = 0; 5493 break; 5494 } 5495 5496 if (!DTRACE_INSCRATCH(mstate, size)) { 5497 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5498 regs[rd] = 0; 5499 break; 5500 } 5501 5502 /* 5503 * Move forward, loading each character. 5504 */ 5505 do { 5506 c = dtrace_load8(src + i++); 5507 next: 5508 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 5509 break; 5510 5511 if (c != '/') { 5512 dest[j++] = c; 5513 continue; 5514 } 5515 5516 c = dtrace_load8(src + i++); 5517 5518 if (c == '/') { 5519 /* 5520 * We have two slashes -- we can just advance 5521 * to the next character. 5522 */ 5523 goto next; 5524 } 5525 5526 if (c != '.') { 5527 /* 5528 * This is not "." and it's not ".." -- we can 5529 * just store the "/" and this character and 5530 * drive on. 5531 */ 5532 dest[j++] = '/'; 5533 dest[j++] = c; 5534 continue; 5535 } 5536 5537 c = dtrace_load8(src + i++); 5538 5539 if (c == '/') { 5540 /* 5541 * This is a "/./" component. We're not going 5542 * to store anything in the destination buffer; 5543 * we're just going to go to the next component. 5544 */ 5545 goto next; 5546 } 5547 5548 if (c != '.') { 5549 /* 5550 * This is not ".." -- we can just store the 5551 * "/." and this character and continue 5552 * processing. 5553 */ 5554 dest[j++] = '/'; 5555 dest[j++] = '.'; 5556 dest[j++] = c; 5557 continue; 5558 } 5559 5560 c = dtrace_load8(src + i++); 5561 5562 if (c != '/' && c != '\0') { 5563 /* 5564 * This is not ".." -- it's "..[mumble]". 5565 * We'll store the "/.." and this character 5566 * and continue processing. 5567 */ 5568 dest[j++] = '/'; 5569 dest[j++] = '.'; 5570 dest[j++] = '.'; 5571 dest[j++] = c; 5572 continue; 5573 } 5574 5575 /* 5576 * This is "/../" or "/..\0". We need to back up 5577 * our destination pointer until we find a "/". 5578 */ 5579 i--; 5580 while (j != 0 && dest[--j] != '/') 5581 continue; 5582 5583 if (c == '\0') 5584 dest[++j] = '/'; 5585 } while (c != '\0'); 5586 5587 dest[j] = '\0'; 5588 5589 #ifdef illumos 5590 if (mstate->dtms_getf != NULL && 5591 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && 5592 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { 5593 /* 5594 * If we've done a getf() as a part of this ECB and we 5595 * don't have kernel access (and we're not in the global 5596 * zone), check if the path we cleaned up begins with 5597 * the zone's root path, and trim it off if so. Note 5598 * that this is an output cleanliness issue, not a 5599 * security issue: knowing one's zone root path does 5600 * not enable privilege escalation. 5601 */ 5602 if (strstr(dest, z->zone_rootpath) == dest) 5603 dest += strlen(z->zone_rootpath) - 1; 5604 } 5605 #endif 5606 5607 regs[rd] = (uintptr_t)dest; 5608 mstate->dtms_scratch_ptr += size; 5609 break; 5610 } 5611 5612 case DIF_SUBR_INET_NTOA: 5613 case DIF_SUBR_INET_NTOA6: 5614 case DIF_SUBR_INET_NTOP: { 5615 size_t size; 5616 int af, argi, i; 5617 char *base, *end; 5618 5619 if (subr == DIF_SUBR_INET_NTOP) { 5620 af = (int)tupregs[0].dttk_value; 5621 argi = 1; 5622 } else { 5623 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 5624 argi = 0; 5625 } 5626 5627 if (af == AF_INET) { 5628 ipaddr_t ip4; 5629 uint8_t *ptr8, val; 5630 5631 /* 5632 * Safely load the IPv4 address. 5633 */ 5634 ip4 = dtrace_load32(tupregs[argi].dttk_value); 5635 5636 /* 5637 * Check an IPv4 string will fit in scratch. 5638 */ 5639 size = INET_ADDRSTRLEN; 5640 if (!DTRACE_INSCRATCH(mstate, size)) { 5641 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5642 regs[rd] = 0; 5643 break; 5644 } 5645 base = (char *)mstate->dtms_scratch_ptr; 5646 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5647 5648 /* 5649 * Stringify as a dotted decimal quad. 5650 */ 5651 *end-- = '\0'; 5652 ptr8 = (uint8_t *)&ip4; 5653 for (i = 3; i >= 0; i--) { 5654 val = ptr8[i]; 5655 5656 if (val == 0) { 5657 *end-- = '0'; 5658 } else { 5659 for (; val; val /= 10) { 5660 *end-- = '0' + (val % 10); 5661 } 5662 } 5663 5664 if (i > 0) 5665 *end-- = '.'; 5666 } 5667 ASSERT(end + 1 >= base); 5668 5669 } else if (af == AF_INET6) { 5670 struct in6_addr ip6; 5671 int firstzero, tryzero, numzero, v6end; 5672 uint16_t val; 5673 const char digits[] = "0123456789abcdef"; 5674 5675 /* 5676 * Stringify using RFC 1884 convention 2 - 16 bit 5677 * hexadecimal values with a zero-run compression. 5678 * Lower case hexadecimal digits are used. 5679 * eg, fe80::214:4fff:fe0b:76c8. 5680 * The IPv4 embedded form is returned for inet_ntop, 5681 * just the IPv4 string is returned for inet_ntoa6. 5682 */ 5683 5684 /* 5685 * Safely load the IPv6 address. 5686 */ 5687 dtrace_bcopy( 5688 (void *)(uintptr_t)tupregs[argi].dttk_value, 5689 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 5690 5691 /* 5692 * Check an IPv6 string will fit in scratch. 5693 */ 5694 size = INET6_ADDRSTRLEN; 5695 if (!DTRACE_INSCRATCH(mstate, size)) { 5696 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5697 regs[rd] = 0; 5698 break; 5699 } 5700 base = (char *)mstate->dtms_scratch_ptr; 5701 end = (char *)mstate->dtms_scratch_ptr + size - 1; 5702 *end-- = '\0'; 5703 5704 /* 5705 * Find the longest run of 16 bit zero values 5706 * for the single allowed zero compression - "::". 5707 */ 5708 firstzero = -1; 5709 tryzero = -1; 5710 numzero = 1; 5711 for (i = 0; i < sizeof (struct in6_addr); i++) { 5712 #ifdef illumos 5713 if (ip6._S6_un._S6_u8[i] == 0 && 5714 #else 5715 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5716 #endif 5717 tryzero == -1 && i % 2 == 0) { 5718 tryzero = i; 5719 continue; 5720 } 5721 5722 if (tryzero != -1 && 5723 #ifdef illumos 5724 (ip6._S6_un._S6_u8[i] != 0 || 5725 #else 5726 (ip6.__u6_addr.__u6_addr8[i] != 0 || 5727 #endif 5728 i == sizeof (struct in6_addr) - 1)) { 5729 5730 if (i - tryzero <= numzero) { 5731 tryzero = -1; 5732 continue; 5733 } 5734 5735 firstzero = tryzero; 5736 numzero = i - i % 2 - tryzero; 5737 tryzero = -1; 5738 5739 #ifdef illumos 5740 if (ip6._S6_un._S6_u8[i] == 0 && 5741 #else 5742 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 5743 #endif 5744 i == sizeof (struct in6_addr) - 1) 5745 numzero += 2; 5746 } 5747 } 5748 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 5749 5750 /* 5751 * Check for an IPv4 embedded address. 5752 */ 5753 v6end = sizeof (struct in6_addr) - 2; 5754 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 5755 IN6_IS_ADDR_V4COMPAT(&ip6)) { 5756 for (i = sizeof (struct in6_addr) - 1; 5757 i >= DTRACE_V4MAPPED_OFFSET; i--) { 5758 ASSERT(end >= base); 5759 5760 #ifdef illumos 5761 val = ip6._S6_un._S6_u8[i]; 5762 #else 5763 val = ip6.__u6_addr.__u6_addr8[i]; 5764 #endif 5765 5766 if (val == 0) { 5767 *end-- = '0'; 5768 } else { 5769 for (; val; val /= 10) { 5770 *end-- = '0' + val % 10; 5771 } 5772 } 5773 5774 if (i > DTRACE_V4MAPPED_OFFSET) 5775 *end-- = '.'; 5776 } 5777 5778 if (subr == DIF_SUBR_INET_NTOA6) 5779 goto inetout; 5780 5781 /* 5782 * Set v6end to skip the IPv4 address that 5783 * we have already stringified. 5784 */ 5785 v6end = 10; 5786 } 5787 5788 /* 5789 * Build the IPv6 string by working through the 5790 * address in reverse. 5791 */ 5792 for (i = v6end; i >= 0; i -= 2) { 5793 ASSERT(end >= base); 5794 5795 if (i == firstzero + numzero - 2) { 5796 *end-- = ':'; 5797 *end-- = ':'; 5798 i -= numzero - 2; 5799 continue; 5800 } 5801 5802 if (i < 14 && i != firstzero - 2) 5803 *end-- = ':'; 5804 5805 #ifdef illumos 5806 val = (ip6._S6_un._S6_u8[i] << 8) + 5807 ip6._S6_un._S6_u8[i + 1]; 5808 #else 5809 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 5810 ip6.__u6_addr.__u6_addr8[i + 1]; 5811 #endif 5812 5813 if (val == 0) { 5814 *end-- = '0'; 5815 } else { 5816 for (; val; val /= 16) { 5817 *end-- = digits[val % 16]; 5818 } 5819 } 5820 } 5821 ASSERT(end + 1 >= base); 5822 5823 } else { 5824 /* 5825 * The user didn't use AH_INET or AH_INET6. 5826 */ 5827 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5828 regs[rd] = 0; 5829 break; 5830 } 5831 5832 inetout: regs[rd] = (uintptr_t)end + 1; 5833 mstate->dtms_scratch_ptr += size; 5834 break; 5835 } 5836 5837 case DIF_SUBR_MEMREF: { 5838 uintptr_t size = 2 * sizeof(uintptr_t); 5839 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5840 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 5841 5842 /* address and length */ 5843 memref[0] = tupregs[0].dttk_value; 5844 memref[1] = tupregs[1].dttk_value; 5845 5846 regs[rd] = (uintptr_t) memref; 5847 mstate->dtms_scratch_ptr += scratch_size; 5848 break; 5849 } 5850 5851 #ifndef illumos 5852 case DIF_SUBR_MEMSTR: { 5853 char *str = (char *)mstate->dtms_scratch_ptr; 5854 uintptr_t mem = tupregs[0].dttk_value; 5855 char c = tupregs[1].dttk_value; 5856 size_t size = tupregs[2].dttk_value; 5857 uint8_t n; 5858 int i; 5859 5860 regs[rd] = 0; 5861 5862 if (size == 0) 5863 break; 5864 5865 if (!dtrace_canload(mem, size - 1, mstate, vstate)) 5866 break; 5867 5868 if (!DTRACE_INSCRATCH(mstate, size)) { 5869 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5870 break; 5871 } 5872 5873 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { 5874 *flags |= CPU_DTRACE_ILLOP; 5875 break; 5876 } 5877 5878 for (i = 0; i < size - 1; i++) { 5879 n = dtrace_load8(mem++); 5880 str[i] = (n == 0) ? c : n; 5881 } 5882 str[size - 1] = 0; 5883 5884 regs[rd] = (uintptr_t)str; 5885 mstate->dtms_scratch_ptr += size; 5886 break; 5887 } 5888 #endif 5889 5890 case DIF_SUBR_TYPEREF: { 5891 uintptr_t size = 4 * sizeof(uintptr_t); 5892 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 5893 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 5894 5895 /* address, num_elements, type_str, type_len */ 5896 typeref[0] = tupregs[0].dttk_value; 5897 typeref[1] = tupregs[1].dttk_value; 5898 typeref[2] = tupregs[2].dttk_value; 5899 typeref[3] = tupregs[3].dttk_value; 5900 5901 regs[rd] = (uintptr_t) typeref; 5902 mstate->dtms_scratch_ptr += scratch_size; 5903 break; 5904 } 5905 } 5906 } 5907 5908 /* 5909 * Emulate the execution of DTrace IR instructions specified by the given 5910 * DIF object. This function is deliberately void of assertions as all of 5911 * the necessary checks are handled by a call to dtrace_difo_validate(). 5912 */ 5913 static uint64_t 5914 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 5915 dtrace_vstate_t *vstate, dtrace_state_t *state) 5916 { 5917 const dif_instr_t *text = difo->dtdo_buf; 5918 const uint_t textlen = difo->dtdo_len; 5919 const char *strtab = difo->dtdo_strtab; 5920 const uint64_t *inttab = difo->dtdo_inttab; 5921 5922 uint64_t rval = 0; 5923 dtrace_statvar_t *svar; 5924 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 5925 dtrace_difv_t *v; 5926 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5927 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 5928 5929 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 5930 uint64_t regs[DIF_DIR_NREGS]; 5931 uint64_t *tmp; 5932 5933 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 5934 int64_t cc_r; 5935 uint_t pc = 0, id, opc = 0; 5936 uint8_t ttop = 0; 5937 dif_instr_t instr; 5938 uint_t r1, r2, rd; 5939 5940 /* 5941 * We stash the current DIF object into the machine state: we need it 5942 * for subsequent access checking. 5943 */ 5944 mstate->dtms_difo = difo; 5945 5946 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 5947 5948 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 5949 opc = pc; 5950 5951 instr = text[pc++]; 5952 r1 = DIF_INSTR_R1(instr); 5953 r2 = DIF_INSTR_R2(instr); 5954 rd = DIF_INSTR_RD(instr); 5955 5956 switch (DIF_INSTR_OP(instr)) { 5957 case DIF_OP_OR: 5958 regs[rd] = regs[r1] | regs[r2]; 5959 break; 5960 case DIF_OP_XOR: 5961 regs[rd] = regs[r1] ^ regs[r2]; 5962 break; 5963 case DIF_OP_AND: 5964 regs[rd] = regs[r1] & regs[r2]; 5965 break; 5966 case DIF_OP_SLL: 5967 regs[rd] = regs[r1] << regs[r2]; 5968 break; 5969 case DIF_OP_SRL: 5970 regs[rd] = regs[r1] >> regs[r2]; 5971 break; 5972 case DIF_OP_SUB: 5973 regs[rd] = regs[r1] - regs[r2]; 5974 break; 5975 case DIF_OP_ADD: 5976 regs[rd] = regs[r1] + regs[r2]; 5977 break; 5978 case DIF_OP_MUL: 5979 regs[rd] = regs[r1] * regs[r2]; 5980 break; 5981 case DIF_OP_SDIV: 5982 if (regs[r2] == 0) { 5983 regs[rd] = 0; 5984 *flags |= CPU_DTRACE_DIVZERO; 5985 } else { 5986 regs[rd] = (int64_t)regs[r1] / 5987 (int64_t)regs[r2]; 5988 } 5989 break; 5990 5991 case DIF_OP_UDIV: 5992 if (regs[r2] == 0) { 5993 regs[rd] = 0; 5994 *flags |= CPU_DTRACE_DIVZERO; 5995 } else { 5996 regs[rd] = regs[r1] / regs[r2]; 5997 } 5998 break; 5999 6000 case DIF_OP_SREM: 6001 if (regs[r2] == 0) { 6002 regs[rd] = 0; 6003 *flags |= CPU_DTRACE_DIVZERO; 6004 } else { 6005 regs[rd] = (int64_t)regs[r1] % 6006 (int64_t)regs[r2]; 6007 } 6008 break; 6009 6010 case DIF_OP_UREM: 6011 if (regs[r2] == 0) { 6012 regs[rd] = 0; 6013 *flags |= CPU_DTRACE_DIVZERO; 6014 } else { 6015 regs[rd] = regs[r1] % regs[r2]; 6016 } 6017 break; 6018 6019 case DIF_OP_NOT: 6020 regs[rd] = ~regs[r1]; 6021 break; 6022 case DIF_OP_MOV: 6023 regs[rd] = regs[r1]; 6024 break; 6025 case DIF_OP_CMP: 6026 cc_r = regs[r1] - regs[r2]; 6027 cc_n = cc_r < 0; 6028 cc_z = cc_r == 0; 6029 cc_v = 0; 6030 cc_c = regs[r1] < regs[r2]; 6031 break; 6032 case DIF_OP_TST: 6033 cc_n = cc_v = cc_c = 0; 6034 cc_z = regs[r1] == 0; 6035 break; 6036 case DIF_OP_BA: 6037 pc = DIF_INSTR_LABEL(instr); 6038 break; 6039 case DIF_OP_BE: 6040 if (cc_z) 6041 pc = DIF_INSTR_LABEL(instr); 6042 break; 6043 case DIF_OP_BNE: 6044 if (cc_z == 0) 6045 pc = DIF_INSTR_LABEL(instr); 6046 break; 6047 case DIF_OP_BG: 6048 if ((cc_z | (cc_n ^ cc_v)) == 0) 6049 pc = DIF_INSTR_LABEL(instr); 6050 break; 6051 case DIF_OP_BGU: 6052 if ((cc_c | cc_z) == 0) 6053 pc = DIF_INSTR_LABEL(instr); 6054 break; 6055 case DIF_OP_BGE: 6056 if ((cc_n ^ cc_v) == 0) 6057 pc = DIF_INSTR_LABEL(instr); 6058 break; 6059 case DIF_OP_BGEU: 6060 if (cc_c == 0) 6061 pc = DIF_INSTR_LABEL(instr); 6062 break; 6063 case DIF_OP_BL: 6064 if (cc_n ^ cc_v) 6065 pc = DIF_INSTR_LABEL(instr); 6066 break; 6067 case DIF_OP_BLU: 6068 if (cc_c) 6069 pc = DIF_INSTR_LABEL(instr); 6070 break; 6071 case DIF_OP_BLE: 6072 if (cc_z | (cc_n ^ cc_v)) 6073 pc = DIF_INSTR_LABEL(instr); 6074 break; 6075 case DIF_OP_BLEU: 6076 if (cc_c | cc_z) 6077 pc = DIF_INSTR_LABEL(instr); 6078 break; 6079 case DIF_OP_RLDSB: 6080 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6081 break; 6082 /*FALLTHROUGH*/ 6083 case DIF_OP_LDSB: 6084 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 6085 break; 6086 case DIF_OP_RLDSH: 6087 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6088 break; 6089 /*FALLTHROUGH*/ 6090 case DIF_OP_LDSH: 6091 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 6092 break; 6093 case DIF_OP_RLDSW: 6094 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6095 break; 6096 /*FALLTHROUGH*/ 6097 case DIF_OP_LDSW: 6098 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 6099 break; 6100 case DIF_OP_RLDUB: 6101 if (!dtrace_canload(regs[r1], 1, mstate, vstate)) 6102 break; 6103 /*FALLTHROUGH*/ 6104 case DIF_OP_LDUB: 6105 regs[rd] = dtrace_load8(regs[r1]); 6106 break; 6107 case DIF_OP_RLDUH: 6108 if (!dtrace_canload(regs[r1], 2, mstate, vstate)) 6109 break; 6110 /*FALLTHROUGH*/ 6111 case DIF_OP_LDUH: 6112 regs[rd] = dtrace_load16(regs[r1]); 6113 break; 6114 case DIF_OP_RLDUW: 6115 if (!dtrace_canload(regs[r1], 4, mstate, vstate)) 6116 break; 6117 /*FALLTHROUGH*/ 6118 case DIF_OP_LDUW: 6119 regs[rd] = dtrace_load32(regs[r1]); 6120 break; 6121 case DIF_OP_RLDX: 6122 if (!dtrace_canload(regs[r1], 8, mstate, vstate)) 6123 break; 6124 /*FALLTHROUGH*/ 6125 case DIF_OP_LDX: 6126 regs[rd] = dtrace_load64(regs[r1]); 6127 break; 6128 case DIF_OP_ULDSB: 6129 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6130 regs[rd] = (int8_t) 6131 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6132 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6133 break; 6134 case DIF_OP_ULDSH: 6135 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6136 regs[rd] = (int16_t) 6137 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6138 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6139 break; 6140 case DIF_OP_ULDSW: 6141 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6142 regs[rd] = (int32_t) 6143 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6144 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6145 break; 6146 case DIF_OP_ULDUB: 6147 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6148 regs[rd] = 6149 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 6150 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6151 break; 6152 case DIF_OP_ULDUH: 6153 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6154 regs[rd] = 6155 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 6156 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6157 break; 6158 case DIF_OP_ULDUW: 6159 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6160 regs[rd] = 6161 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 6162 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6163 break; 6164 case DIF_OP_ULDX: 6165 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6166 regs[rd] = 6167 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 6168 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6169 break; 6170 case DIF_OP_RET: 6171 rval = regs[rd]; 6172 pc = textlen; 6173 break; 6174 case DIF_OP_NOP: 6175 break; 6176 case DIF_OP_SETX: 6177 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 6178 break; 6179 case DIF_OP_SETS: 6180 regs[rd] = (uint64_t)(uintptr_t) 6181 (strtab + DIF_INSTR_STRING(instr)); 6182 break; 6183 case DIF_OP_SCMP: { 6184 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 6185 uintptr_t s1 = regs[r1]; 6186 uintptr_t s2 = regs[r2]; 6187 6188 if (s1 != 0 && 6189 !dtrace_strcanload(s1, sz, mstate, vstate)) 6190 break; 6191 if (s2 != 0 && 6192 !dtrace_strcanload(s2, sz, mstate, vstate)) 6193 break; 6194 6195 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 6196 6197 cc_n = cc_r < 0; 6198 cc_z = cc_r == 0; 6199 cc_v = cc_c = 0; 6200 break; 6201 } 6202 case DIF_OP_LDGA: 6203 regs[rd] = dtrace_dif_variable(mstate, state, 6204 r1, regs[r2]); 6205 break; 6206 case DIF_OP_LDGS: 6207 id = DIF_INSTR_VAR(instr); 6208 6209 if (id >= DIF_VAR_OTHER_UBASE) { 6210 uintptr_t a; 6211 6212 id -= DIF_VAR_OTHER_UBASE; 6213 svar = vstate->dtvs_globals[id]; 6214 ASSERT(svar != NULL); 6215 v = &svar->dtsv_var; 6216 6217 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 6218 regs[rd] = svar->dtsv_data; 6219 break; 6220 } 6221 6222 a = (uintptr_t)svar->dtsv_data; 6223 6224 if (*(uint8_t *)a == UINT8_MAX) { 6225 /* 6226 * If the 0th byte is set to UINT8_MAX 6227 * then this is to be treated as a 6228 * reference to a NULL variable. 6229 */ 6230 regs[rd] = 0; 6231 } else { 6232 regs[rd] = a + sizeof (uint64_t); 6233 } 6234 6235 break; 6236 } 6237 6238 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 6239 break; 6240 6241 case DIF_OP_STGS: 6242 id = DIF_INSTR_VAR(instr); 6243 6244 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6245 id -= DIF_VAR_OTHER_UBASE; 6246 6247 svar = vstate->dtvs_globals[id]; 6248 ASSERT(svar != NULL); 6249 v = &svar->dtsv_var; 6250 6251 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6252 uintptr_t a = (uintptr_t)svar->dtsv_data; 6253 6254 ASSERT(a != 0); 6255 ASSERT(svar->dtsv_size != 0); 6256 6257 if (regs[rd] == 0) { 6258 *(uint8_t *)a = UINT8_MAX; 6259 break; 6260 } else { 6261 *(uint8_t *)a = 0; 6262 a += sizeof (uint64_t); 6263 } 6264 if (!dtrace_vcanload( 6265 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6266 mstate, vstate)) 6267 break; 6268 6269 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6270 (void *)a, &v->dtdv_type); 6271 break; 6272 } 6273 6274 svar->dtsv_data = regs[rd]; 6275 break; 6276 6277 case DIF_OP_LDTA: 6278 /* 6279 * There are no DTrace built-in thread-local arrays at 6280 * present. This opcode is saved for future work. 6281 */ 6282 *flags |= CPU_DTRACE_ILLOP; 6283 regs[rd] = 0; 6284 break; 6285 6286 case DIF_OP_LDLS: 6287 id = DIF_INSTR_VAR(instr); 6288 6289 if (id < DIF_VAR_OTHER_UBASE) { 6290 /* 6291 * For now, this has no meaning. 6292 */ 6293 regs[rd] = 0; 6294 break; 6295 } 6296 6297 id -= DIF_VAR_OTHER_UBASE; 6298 6299 ASSERT(id < vstate->dtvs_nlocals); 6300 ASSERT(vstate->dtvs_locals != NULL); 6301 6302 svar = vstate->dtvs_locals[id]; 6303 ASSERT(svar != NULL); 6304 v = &svar->dtsv_var; 6305 6306 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6307 uintptr_t a = (uintptr_t)svar->dtsv_data; 6308 size_t sz = v->dtdv_type.dtdt_size; 6309 6310 sz += sizeof (uint64_t); 6311 ASSERT(svar->dtsv_size == NCPU * sz); 6312 a += curcpu * sz; 6313 6314 if (*(uint8_t *)a == UINT8_MAX) { 6315 /* 6316 * If the 0th byte is set to UINT8_MAX 6317 * then this is to be treated as a 6318 * reference to a NULL variable. 6319 */ 6320 regs[rd] = 0; 6321 } else { 6322 regs[rd] = a + sizeof (uint64_t); 6323 } 6324 6325 break; 6326 } 6327 6328 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6329 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6330 regs[rd] = tmp[curcpu]; 6331 break; 6332 6333 case DIF_OP_STLS: 6334 id = DIF_INSTR_VAR(instr); 6335 6336 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6337 id -= DIF_VAR_OTHER_UBASE; 6338 ASSERT(id < vstate->dtvs_nlocals); 6339 6340 ASSERT(vstate->dtvs_locals != NULL); 6341 svar = vstate->dtvs_locals[id]; 6342 ASSERT(svar != NULL); 6343 v = &svar->dtsv_var; 6344 6345 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6346 uintptr_t a = (uintptr_t)svar->dtsv_data; 6347 size_t sz = v->dtdv_type.dtdt_size; 6348 6349 sz += sizeof (uint64_t); 6350 ASSERT(svar->dtsv_size == NCPU * sz); 6351 a += curcpu * sz; 6352 6353 if (regs[rd] == 0) { 6354 *(uint8_t *)a = UINT8_MAX; 6355 break; 6356 } else { 6357 *(uint8_t *)a = 0; 6358 a += sizeof (uint64_t); 6359 } 6360 6361 if (!dtrace_vcanload( 6362 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6363 mstate, vstate)) 6364 break; 6365 6366 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6367 (void *)a, &v->dtdv_type); 6368 break; 6369 } 6370 6371 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 6372 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 6373 tmp[curcpu] = regs[rd]; 6374 break; 6375 6376 case DIF_OP_LDTS: { 6377 dtrace_dynvar_t *dvar; 6378 dtrace_key_t *key; 6379 6380 id = DIF_INSTR_VAR(instr); 6381 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6382 id -= DIF_VAR_OTHER_UBASE; 6383 v = &vstate->dtvs_tlocals[id]; 6384 6385 key = &tupregs[DIF_DTR_NREGS]; 6386 key[0].dttk_value = (uint64_t)id; 6387 key[0].dttk_size = 0; 6388 DTRACE_TLS_THRKEY(key[1].dttk_value); 6389 key[1].dttk_size = 0; 6390 6391 dvar = dtrace_dynvar(dstate, 2, key, 6392 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 6393 mstate, vstate); 6394 6395 if (dvar == NULL) { 6396 regs[rd] = 0; 6397 break; 6398 } 6399 6400 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6401 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6402 } else { 6403 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6404 } 6405 6406 break; 6407 } 6408 6409 case DIF_OP_STTS: { 6410 dtrace_dynvar_t *dvar; 6411 dtrace_key_t *key; 6412 6413 id = DIF_INSTR_VAR(instr); 6414 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6415 id -= DIF_VAR_OTHER_UBASE; 6416 6417 key = &tupregs[DIF_DTR_NREGS]; 6418 key[0].dttk_value = (uint64_t)id; 6419 key[0].dttk_size = 0; 6420 DTRACE_TLS_THRKEY(key[1].dttk_value); 6421 key[1].dttk_size = 0; 6422 v = &vstate->dtvs_tlocals[id]; 6423 6424 dvar = dtrace_dynvar(dstate, 2, key, 6425 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6426 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6427 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6428 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6429 6430 /* 6431 * Given that we're storing to thread-local data, 6432 * we need to flush our predicate cache. 6433 */ 6434 curthread->t_predcache = 0; 6435 6436 if (dvar == NULL) 6437 break; 6438 6439 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6440 if (!dtrace_vcanload( 6441 (void *)(uintptr_t)regs[rd], 6442 &v->dtdv_type, mstate, vstate)) 6443 break; 6444 6445 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6446 dvar->dtdv_data, &v->dtdv_type); 6447 } else { 6448 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6449 } 6450 6451 break; 6452 } 6453 6454 case DIF_OP_SRA: 6455 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 6456 break; 6457 6458 case DIF_OP_CALL: 6459 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 6460 regs, tupregs, ttop, mstate, state); 6461 break; 6462 6463 case DIF_OP_PUSHTR: 6464 if (ttop == DIF_DTR_NREGS) { 6465 *flags |= CPU_DTRACE_TUPOFLOW; 6466 break; 6467 } 6468 6469 if (r1 == DIF_TYPE_STRING) { 6470 /* 6471 * If this is a string type and the size is 0, 6472 * we'll use the system-wide default string 6473 * size. Note that we are _not_ looking at 6474 * the value of the DTRACEOPT_STRSIZE option; 6475 * had this been set, we would expect to have 6476 * a non-zero size value in the "pushtr". 6477 */ 6478 tupregs[ttop].dttk_size = 6479 dtrace_strlen((char *)(uintptr_t)regs[rd], 6480 regs[r2] ? regs[r2] : 6481 dtrace_strsize_default) + 1; 6482 } else { 6483 if (regs[r2] > LONG_MAX) { 6484 *flags |= CPU_DTRACE_ILLOP; 6485 break; 6486 } 6487 6488 tupregs[ttop].dttk_size = regs[r2]; 6489 } 6490 6491 tupregs[ttop++].dttk_value = regs[rd]; 6492 break; 6493 6494 case DIF_OP_PUSHTV: 6495 if (ttop == DIF_DTR_NREGS) { 6496 *flags |= CPU_DTRACE_TUPOFLOW; 6497 break; 6498 } 6499 6500 tupregs[ttop].dttk_value = regs[rd]; 6501 tupregs[ttop++].dttk_size = 0; 6502 break; 6503 6504 case DIF_OP_POPTS: 6505 if (ttop != 0) 6506 ttop--; 6507 break; 6508 6509 case DIF_OP_FLUSHTS: 6510 ttop = 0; 6511 break; 6512 6513 case DIF_OP_LDGAA: 6514 case DIF_OP_LDTAA: { 6515 dtrace_dynvar_t *dvar; 6516 dtrace_key_t *key = tupregs; 6517 uint_t nkeys = ttop; 6518 6519 id = DIF_INSTR_VAR(instr); 6520 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6521 id -= DIF_VAR_OTHER_UBASE; 6522 6523 key[nkeys].dttk_value = (uint64_t)id; 6524 key[nkeys++].dttk_size = 0; 6525 6526 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 6527 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6528 key[nkeys++].dttk_size = 0; 6529 v = &vstate->dtvs_tlocals[id]; 6530 } else { 6531 v = &vstate->dtvs_globals[id]->dtsv_var; 6532 } 6533 6534 dvar = dtrace_dynvar(dstate, nkeys, key, 6535 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6536 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6537 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 6538 6539 if (dvar == NULL) { 6540 regs[rd] = 0; 6541 break; 6542 } 6543 6544 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6545 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 6546 } else { 6547 regs[rd] = *((uint64_t *)dvar->dtdv_data); 6548 } 6549 6550 break; 6551 } 6552 6553 case DIF_OP_STGAA: 6554 case DIF_OP_STTAA: { 6555 dtrace_dynvar_t *dvar; 6556 dtrace_key_t *key = tupregs; 6557 uint_t nkeys = ttop; 6558 6559 id = DIF_INSTR_VAR(instr); 6560 ASSERT(id >= DIF_VAR_OTHER_UBASE); 6561 id -= DIF_VAR_OTHER_UBASE; 6562 6563 key[nkeys].dttk_value = (uint64_t)id; 6564 key[nkeys++].dttk_size = 0; 6565 6566 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 6567 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 6568 key[nkeys++].dttk_size = 0; 6569 v = &vstate->dtvs_tlocals[id]; 6570 } else { 6571 v = &vstate->dtvs_globals[id]->dtsv_var; 6572 } 6573 6574 dvar = dtrace_dynvar(dstate, nkeys, key, 6575 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 6576 v->dtdv_type.dtdt_size : sizeof (uint64_t), 6577 regs[rd] ? DTRACE_DYNVAR_ALLOC : 6578 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 6579 6580 if (dvar == NULL) 6581 break; 6582 6583 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 6584 if (!dtrace_vcanload( 6585 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 6586 mstate, vstate)) 6587 break; 6588 6589 dtrace_vcopy((void *)(uintptr_t)regs[rd], 6590 dvar->dtdv_data, &v->dtdv_type); 6591 } else { 6592 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 6593 } 6594 6595 break; 6596 } 6597 6598 case DIF_OP_ALLOCS: { 6599 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6600 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 6601 6602 /* 6603 * Rounding up the user allocation size could have 6604 * overflowed large, bogus allocations (like -1ULL) to 6605 * 0. 6606 */ 6607 if (size < regs[r1] || 6608 !DTRACE_INSCRATCH(mstate, size)) { 6609 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6610 regs[rd] = 0; 6611 break; 6612 } 6613 6614 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 6615 mstate->dtms_scratch_ptr += size; 6616 regs[rd] = ptr; 6617 break; 6618 } 6619 6620 case DIF_OP_COPYS: 6621 if (!dtrace_canstore(regs[rd], regs[r2], 6622 mstate, vstate)) { 6623 *flags |= CPU_DTRACE_BADADDR; 6624 *illval = regs[rd]; 6625 break; 6626 } 6627 6628 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 6629 break; 6630 6631 dtrace_bcopy((void *)(uintptr_t)regs[r1], 6632 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 6633 break; 6634 6635 case DIF_OP_STB: 6636 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 6637 *flags |= CPU_DTRACE_BADADDR; 6638 *illval = regs[rd]; 6639 break; 6640 } 6641 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 6642 break; 6643 6644 case DIF_OP_STH: 6645 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 6646 *flags |= CPU_DTRACE_BADADDR; 6647 *illval = regs[rd]; 6648 break; 6649 } 6650 if (regs[rd] & 1) { 6651 *flags |= CPU_DTRACE_BADALIGN; 6652 *illval = regs[rd]; 6653 break; 6654 } 6655 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 6656 break; 6657 6658 case DIF_OP_STW: 6659 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 6660 *flags |= CPU_DTRACE_BADADDR; 6661 *illval = regs[rd]; 6662 break; 6663 } 6664 if (regs[rd] & 3) { 6665 *flags |= CPU_DTRACE_BADALIGN; 6666 *illval = regs[rd]; 6667 break; 6668 } 6669 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 6670 break; 6671 6672 case DIF_OP_STX: 6673 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 6674 *flags |= CPU_DTRACE_BADADDR; 6675 *illval = regs[rd]; 6676 break; 6677 } 6678 if (regs[rd] & 7) { 6679 *flags |= CPU_DTRACE_BADALIGN; 6680 *illval = regs[rd]; 6681 break; 6682 } 6683 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 6684 break; 6685 } 6686 } 6687 6688 if (!(*flags & CPU_DTRACE_FAULT)) 6689 return (rval); 6690 6691 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 6692 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 6693 6694 return (0); 6695 } 6696 6697 static void 6698 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 6699 { 6700 dtrace_probe_t *probe = ecb->dte_probe; 6701 dtrace_provider_t *prov = probe->dtpr_provider; 6702 char c[DTRACE_FULLNAMELEN + 80], *str; 6703 char *msg = "dtrace: breakpoint action at probe "; 6704 char *ecbmsg = " (ecb "; 6705 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 6706 uintptr_t val = (uintptr_t)ecb; 6707 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 6708 6709 if (dtrace_destructive_disallow) 6710 return; 6711 6712 /* 6713 * It's impossible to be taking action on the NULL probe. 6714 */ 6715 ASSERT(probe != NULL); 6716 6717 /* 6718 * This is a poor man's (destitute man's?) sprintf(): we want to 6719 * print the provider name, module name, function name and name of 6720 * the probe, along with the hex address of the ECB with the breakpoint 6721 * action -- all of which we must place in the character buffer by 6722 * hand. 6723 */ 6724 while (*msg != '\0') 6725 c[i++] = *msg++; 6726 6727 for (str = prov->dtpv_name; *str != '\0'; str++) 6728 c[i++] = *str; 6729 c[i++] = ':'; 6730 6731 for (str = probe->dtpr_mod; *str != '\0'; str++) 6732 c[i++] = *str; 6733 c[i++] = ':'; 6734 6735 for (str = probe->dtpr_func; *str != '\0'; str++) 6736 c[i++] = *str; 6737 c[i++] = ':'; 6738 6739 for (str = probe->dtpr_name; *str != '\0'; str++) 6740 c[i++] = *str; 6741 6742 while (*ecbmsg != '\0') 6743 c[i++] = *ecbmsg++; 6744 6745 while (shift >= 0) { 6746 mask = (uintptr_t)0xf << shift; 6747 6748 if (val >= ((uintptr_t)1 << shift)) 6749 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 6750 shift -= 4; 6751 } 6752 6753 c[i++] = ')'; 6754 c[i] = '\0'; 6755 6756 #ifdef illumos 6757 debug_enter(c); 6758 #else 6759 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 6760 #endif 6761 } 6762 6763 static void 6764 dtrace_action_panic(dtrace_ecb_t *ecb) 6765 { 6766 dtrace_probe_t *probe = ecb->dte_probe; 6767 6768 /* 6769 * It's impossible to be taking action on the NULL probe. 6770 */ 6771 ASSERT(probe != NULL); 6772 6773 if (dtrace_destructive_disallow) 6774 return; 6775 6776 if (dtrace_panicked != NULL) 6777 return; 6778 6779 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 6780 return; 6781 6782 /* 6783 * We won the right to panic. (We want to be sure that only one 6784 * thread calls panic() from dtrace_probe(), and that panic() is 6785 * called exactly once.) 6786 */ 6787 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 6788 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 6789 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 6790 } 6791 6792 static void 6793 dtrace_action_raise(uint64_t sig) 6794 { 6795 if (dtrace_destructive_disallow) 6796 return; 6797 6798 if (sig >= NSIG) { 6799 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6800 return; 6801 } 6802 6803 #ifdef illumos 6804 /* 6805 * raise() has a queue depth of 1 -- we ignore all subsequent 6806 * invocations of the raise() action. 6807 */ 6808 if (curthread->t_dtrace_sig == 0) 6809 curthread->t_dtrace_sig = (uint8_t)sig; 6810 6811 curthread->t_sig_check = 1; 6812 aston(curthread); 6813 #else 6814 struct proc *p = curproc; 6815 PROC_LOCK(p); 6816 kern_psignal(p, sig); 6817 PROC_UNLOCK(p); 6818 #endif 6819 } 6820 6821 static void 6822 dtrace_action_stop(void) 6823 { 6824 if (dtrace_destructive_disallow) 6825 return; 6826 6827 #ifdef illumos 6828 if (!curthread->t_dtrace_stop) { 6829 curthread->t_dtrace_stop = 1; 6830 curthread->t_sig_check = 1; 6831 aston(curthread); 6832 } 6833 #else 6834 struct proc *p = curproc; 6835 PROC_LOCK(p); 6836 kern_psignal(p, SIGSTOP); 6837 PROC_UNLOCK(p); 6838 #endif 6839 } 6840 6841 static void 6842 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 6843 { 6844 hrtime_t now; 6845 volatile uint16_t *flags; 6846 #ifdef illumos 6847 cpu_t *cpu = CPU; 6848 #else 6849 cpu_t *cpu = &solaris_cpu[curcpu]; 6850 #endif 6851 6852 if (dtrace_destructive_disallow) 6853 return; 6854 6855 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 6856 6857 now = dtrace_gethrtime(); 6858 6859 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 6860 /* 6861 * We need to advance the mark to the current time. 6862 */ 6863 cpu->cpu_dtrace_chillmark = now; 6864 cpu->cpu_dtrace_chilled = 0; 6865 } 6866 6867 /* 6868 * Now check to see if the requested chill time would take us over 6869 * the maximum amount of time allowed in the chill interval. (Or 6870 * worse, if the calculation itself induces overflow.) 6871 */ 6872 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 6873 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 6874 *flags |= CPU_DTRACE_ILLOP; 6875 return; 6876 } 6877 6878 while (dtrace_gethrtime() - now < val) 6879 continue; 6880 6881 /* 6882 * Normally, we assure that the value of the variable "timestamp" does 6883 * not change within an ECB. The presence of chill() represents an 6884 * exception to this rule, however. 6885 */ 6886 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 6887 cpu->cpu_dtrace_chilled += val; 6888 } 6889 6890 static void 6891 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 6892 uint64_t *buf, uint64_t arg) 6893 { 6894 int nframes = DTRACE_USTACK_NFRAMES(arg); 6895 int strsize = DTRACE_USTACK_STRSIZE(arg); 6896 uint64_t *pcs = &buf[1], *fps; 6897 char *str = (char *)&pcs[nframes]; 6898 int size, offs = 0, i, j; 6899 uintptr_t old = mstate->dtms_scratch_ptr, saved; 6900 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 6901 char *sym; 6902 6903 /* 6904 * Should be taking a faster path if string space has not been 6905 * allocated. 6906 */ 6907 ASSERT(strsize != 0); 6908 6909 /* 6910 * We will first allocate some temporary space for the frame pointers. 6911 */ 6912 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 6913 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 6914 (nframes * sizeof (uint64_t)); 6915 6916 if (!DTRACE_INSCRATCH(mstate, size)) { 6917 /* 6918 * Not enough room for our frame pointers -- need to indicate 6919 * that we ran out of scratch space. 6920 */ 6921 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 6922 return; 6923 } 6924 6925 mstate->dtms_scratch_ptr += size; 6926 saved = mstate->dtms_scratch_ptr; 6927 6928 /* 6929 * Now get a stack with both program counters and frame pointers. 6930 */ 6931 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6932 dtrace_getufpstack(buf, fps, nframes + 1); 6933 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6934 6935 /* 6936 * If that faulted, we're cooked. 6937 */ 6938 if (*flags & CPU_DTRACE_FAULT) 6939 goto out; 6940 6941 /* 6942 * Now we want to walk up the stack, calling the USTACK helper. For 6943 * each iteration, we restore the scratch pointer. 6944 */ 6945 for (i = 0; i < nframes; i++) { 6946 mstate->dtms_scratch_ptr = saved; 6947 6948 if (offs >= strsize) 6949 break; 6950 6951 sym = (char *)(uintptr_t)dtrace_helper( 6952 DTRACE_HELPER_ACTION_USTACK, 6953 mstate, state, pcs[i], fps[i]); 6954 6955 /* 6956 * If we faulted while running the helper, we're going to 6957 * clear the fault and null out the corresponding string. 6958 */ 6959 if (*flags & CPU_DTRACE_FAULT) { 6960 *flags &= ~CPU_DTRACE_FAULT; 6961 str[offs++] = '\0'; 6962 continue; 6963 } 6964 6965 if (sym == NULL) { 6966 str[offs++] = '\0'; 6967 continue; 6968 } 6969 6970 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6971 6972 /* 6973 * Now copy in the string that the helper returned to us. 6974 */ 6975 for (j = 0; offs + j < strsize; j++) { 6976 if ((str[offs + j] = sym[j]) == '\0') 6977 break; 6978 } 6979 6980 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6981 6982 offs += j + 1; 6983 } 6984 6985 if (offs >= strsize) { 6986 /* 6987 * If we didn't have room for all of the strings, we don't 6988 * abort processing -- this needn't be a fatal error -- but we 6989 * still want to increment a counter (dts_stkstroverflows) to 6990 * allow this condition to be warned about. (If this is from 6991 * a jstack() action, it is easily tuned via jstackstrsize.) 6992 */ 6993 dtrace_error(&state->dts_stkstroverflows); 6994 } 6995 6996 while (offs < strsize) 6997 str[offs++] = '\0'; 6998 6999 out: 7000 mstate->dtms_scratch_ptr = old; 7001 } 7002 7003 static void 7004 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, 7005 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) 7006 { 7007 volatile uint16_t *flags; 7008 uint64_t val = *valp; 7009 size_t valoffs = *valoffsp; 7010 7011 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 7012 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); 7013 7014 /* 7015 * If this is a string, we're going to only load until we find the zero 7016 * byte -- after which we'll store zero bytes. 7017 */ 7018 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 7019 char c = '\0' + 1; 7020 size_t s; 7021 7022 for (s = 0; s < size; s++) { 7023 if (c != '\0' && dtkind == DIF_TF_BYREF) { 7024 c = dtrace_load8(val++); 7025 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { 7026 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7027 c = dtrace_fuword8((void *)(uintptr_t)val++); 7028 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7029 if (*flags & CPU_DTRACE_FAULT) 7030 break; 7031 } 7032 7033 DTRACE_STORE(uint8_t, tomax, valoffs++, c); 7034 7035 if (c == '\0' && intuple) 7036 break; 7037 } 7038 } else { 7039 uint8_t c; 7040 while (valoffs < end) { 7041 if (dtkind == DIF_TF_BYREF) { 7042 c = dtrace_load8(val++); 7043 } else if (dtkind == DIF_TF_BYUREF) { 7044 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7045 c = dtrace_fuword8((void *)(uintptr_t)val++); 7046 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7047 if (*flags & CPU_DTRACE_FAULT) 7048 break; 7049 } 7050 7051 DTRACE_STORE(uint8_t, tomax, 7052 valoffs++, c); 7053 } 7054 } 7055 7056 *valp = val; 7057 *valoffsp = valoffs; 7058 } 7059 7060 /* 7061 * If you're looking for the epicenter of DTrace, you just found it. This 7062 * is the function called by the provider to fire a probe -- from which all 7063 * subsequent probe-context DTrace activity emanates. 7064 */ 7065 void 7066 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 7067 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 7068 { 7069 processorid_t cpuid; 7070 dtrace_icookie_t cookie; 7071 dtrace_probe_t *probe; 7072 dtrace_mstate_t mstate; 7073 dtrace_ecb_t *ecb; 7074 dtrace_action_t *act; 7075 intptr_t offs; 7076 size_t size; 7077 int vtime, onintr; 7078 volatile uint16_t *flags; 7079 hrtime_t now; 7080 7081 if (panicstr != NULL) 7082 return; 7083 7084 #ifdef illumos 7085 /* 7086 * Kick out immediately if this CPU is still being born (in which case 7087 * curthread will be set to -1) or the current thread can't allow 7088 * probes in its current context. 7089 */ 7090 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 7091 return; 7092 #endif 7093 7094 cookie = dtrace_interrupt_disable(); 7095 probe = dtrace_probes[id - 1]; 7096 cpuid = curcpu; 7097 onintr = CPU_ON_INTR(CPU); 7098 7099 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 7100 probe->dtpr_predcache == curthread->t_predcache) { 7101 /* 7102 * We have hit in the predicate cache; we know that 7103 * this predicate would evaluate to be false. 7104 */ 7105 dtrace_interrupt_enable(cookie); 7106 return; 7107 } 7108 7109 #ifdef illumos 7110 if (panic_quiesce) { 7111 #else 7112 if (panicstr != NULL) { 7113 #endif 7114 /* 7115 * We don't trace anything if we're panicking. 7116 */ 7117 dtrace_interrupt_enable(cookie); 7118 return; 7119 } 7120 7121 now = mstate.dtms_timestamp = dtrace_gethrtime(); 7122 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7123 vtime = dtrace_vtime_references != 0; 7124 7125 if (vtime && curthread->t_dtrace_start) 7126 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 7127 7128 mstate.dtms_difo = NULL; 7129 mstate.dtms_probe = probe; 7130 mstate.dtms_strtok = 0; 7131 mstate.dtms_arg[0] = arg0; 7132 mstate.dtms_arg[1] = arg1; 7133 mstate.dtms_arg[2] = arg2; 7134 mstate.dtms_arg[3] = arg3; 7135 mstate.dtms_arg[4] = arg4; 7136 7137 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 7138 7139 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 7140 dtrace_predicate_t *pred = ecb->dte_predicate; 7141 dtrace_state_t *state = ecb->dte_state; 7142 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 7143 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 7144 dtrace_vstate_t *vstate = &state->dts_vstate; 7145 dtrace_provider_t *prov = probe->dtpr_provider; 7146 uint64_t tracememsize = 0; 7147 int committed = 0; 7148 caddr_t tomax; 7149 7150 /* 7151 * A little subtlety with the following (seemingly innocuous) 7152 * declaration of the automatic 'val': by looking at the 7153 * code, you might think that it could be declared in the 7154 * action processing loop, below. (That is, it's only used in 7155 * the action processing loop.) However, it must be declared 7156 * out of that scope because in the case of DIF expression 7157 * arguments to aggregating actions, one iteration of the 7158 * action loop will use the last iteration's value. 7159 */ 7160 uint64_t val = 0; 7161 7162 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 7163 mstate.dtms_getf = NULL; 7164 7165 *flags &= ~CPU_DTRACE_ERROR; 7166 7167 if (prov == dtrace_provider) { 7168 /* 7169 * If dtrace itself is the provider of this probe, 7170 * we're only going to continue processing the ECB if 7171 * arg0 (the dtrace_state_t) is equal to the ECB's 7172 * creating state. (This prevents disjoint consumers 7173 * from seeing one another's metaprobes.) 7174 */ 7175 if (arg0 != (uint64_t)(uintptr_t)state) 7176 continue; 7177 } 7178 7179 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 7180 /* 7181 * We're not currently active. If our provider isn't 7182 * the dtrace pseudo provider, we're not interested. 7183 */ 7184 if (prov != dtrace_provider) 7185 continue; 7186 7187 /* 7188 * Now we must further check if we are in the BEGIN 7189 * probe. If we are, we will only continue processing 7190 * if we're still in WARMUP -- if one BEGIN enabling 7191 * has invoked the exit() action, we don't want to 7192 * evaluate subsequent BEGIN enablings. 7193 */ 7194 if (probe->dtpr_id == dtrace_probeid_begin && 7195 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 7196 ASSERT(state->dts_activity == 7197 DTRACE_ACTIVITY_DRAINING); 7198 continue; 7199 } 7200 } 7201 7202 if (ecb->dte_cond) { 7203 /* 7204 * If the dte_cond bits indicate that this 7205 * consumer is only allowed to see user-mode firings 7206 * of this probe, call the provider's dtps_usermode() 7207 * entry point to check that the probe was fired 7208 * while in a user context. Skip this ECB if that's 7209 * not the case. 7210 */ 7211 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 7212 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 7213 probe->dtpr_id, probe->dtpr_arg) == 0) 7214 continue; 7215 7216 #ifdef illumos 7217 /* 7218 * This is more subtle than it looks. We have to be 7219 * absolutely certain that CRED() isn't going to 7220 * change out from under us so it's only legit to 7221 * examine that structure if we're in constrained 7222 * situations. Currently, the only times we'll this 7223 * check is if a non-super-user has enabled the 7224 * profile or syscall providers -- providers that 7225 * allow visibility of all processes. For the 7226 * profile case, the check above will ensure that 7227 * we're examining a user context. 7228 */ 7229 if (ecb->dte_cond & DTRACE_COND_OWNER) { 7230 cred_t *cr; 7231 cred_t *s_cr = 7232 ecb->dte_state->dts_cred.dcr_cred; 7233 proc_t *proc; 7234 7235 ASSERT(s_cr != NULL); 7236 7237 if ((cr = CRED()) == NULL || 7238 s_cr->cr_uid != cr->cr_uid || 7239 s_cr->cr_uid != cr->cr_ruid || 7240 s_cr->cr_uid != cr->cr_suid || 7241 s_cr->cr_gid != cr->cr_gid || 7242 s_cr->cr_gid != cr->cr_rgid || 7243 s_cr->cr_gid != cr->cr_sgid || 7244 (proc = ttoproc(curthread)) == NULL || 7245 (proc->p_flag & SNOCD)) 7246 continue; 7247 } 7248 7249 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 7250 cred_t *cr; 7251 cred_t *s_cr = 7252 ecb->dte_state->dts_cred.dcr_cred; 7253 7254 ASSERT(s_cr != NULL); 7255 7256 if ((cr = CRED()) == NULL || 7257 s_cr->cr_zone->zone_id != 7258 cr->cr_zone->zone_id) 7259 continue; 7260 } 7261 #endif 7262 } 7263 7264 if (now - state->dts_alive > dtrace_deadman_timeout) { 7265 /* 7266 * We seem to be dead. Unless we (a) have kernel 7267 * destructive permissions (b) have explicitly enabled 7268 * destructive actions and (c) destructive actions have 7269 * not been disabled, we're going to transition into 7270 * the KILLED state, from which no further processing 7271 * on this state will be performed. 7272 */ 7273 if (!dtrace_priv_kernel_destructive(state) || 7274 !state->dts_cred.dcr_destructive || 7275 dtrace_destructive_disallow) { 7276 void *activity = &state->dts_activity; 7277 dtrace_activity_t current; 7278 7279 do { 7280 current = state->dts_activity; 7281 } while (dtrace_cas32(activity, current, 7282 DTRACE_ACTIVITY_KILLED) != current); 7283 7284 continue; 7285 } 7286 } 7287 7288 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 7289 ecb->dte_alignment, state, &mstate)) < 0) 7290 continue; 7291 7292 tomax = buf->dtb_tomax; 7293 ASSERT(tomax != NULL); 7294 7295 if (ecb->dte_size != 0) { 7296 dtrace_rechdr_t dtrh; 7297 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 7298 mstate.dtms_timestamp = dtrace_gethrtime(); 7299 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 7300 } 7301 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 7302 dtrh.dtrh_epid = ecb->dte_epid; 7303 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 7304 mstate.dtms_timestamp); 7305 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 7306 } 7307 7308 mstate.dtms_epid = ecb->dte_epid; 7309 mstate.dtms_present |= DTRACE_MSTATE_EPID; 7310 7311 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 7312 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 7313 else 7314 mstate.dtms_access = 0; 7315 7316 if (pred != NULL) { 7317 dtrace_difo_t *dp = pred->dtp_difo; 7318 int rval; 7319 7320 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 7321 7322 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 7323 dtrace_cacheid_t cid = probe->dtpr_predcache; 7324 7325 if (cid != DTRACE_CACHEIDNONE && !onintr) { 7326 /* 7327 * Update the predicate cache... 7328 */ 7329 ASSERT(cid == pred->dtp_cacheid); 7330 curthread->t_predcache = cid; 7331 } 7332 7333 continue; 7334 } 7335 } 7336 7337 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 7338 act != NULL; act = act->dta_next) { 7339 size_t valoffs; 7340 dtrace_difo_t *dp; 7341 dtrace_recdesc_t *rec = &act->dta_rec; 7342 7343 size = rec->dtrd_size; 7344 valoffs = offs + rec->dtrd_offset; 7345 7346 if (DTRACEACT_ISAGG(act->dta_kind)) { 7347 uint64_t v = 0xbad; 7348 dtrace_aggregation_t *agg; 7349 7350 agg = (dtrace_aggregation_t *)act; 7351 7352 if ((dp = act->dta_difo) != NULL) 7353 v = dtrace_dif_emulate(dp, 7354 &mstate, vstate, state); 7355 7356 if (*flags & CPU_DTRACE_ERROR) 7357 continue; 7358 7359 /* 7360 * Note that we always pass the expression 7361 * value from the previous iteration of the 7362 * action loop. This value will only be used 7363 * if there is an expression argument to the 7364 * aggregating action, denoted by the 7365 * dtag_hasarg field. 7366 */ 7367 dtrace_aggregate(agg, buf, 7368 offs, aggbuf, v, val); 7369 continue; 7370 } 7371 7372 switch (act->dta_kind) { 7373 case DTRACEACT_STOP: 7374 if (dtrace_priv_proc_destructive(state)) 7375 dtrace_action_stop(); 7376 continue; 7377 7378 case DTRACEACT_BREAKPOINT: 7379 if (dtrace_priv_kernel_destructive(state)) 7380 dtrace_action_breakpoint(ecb); 7381 continue; 7382 7383 case DTRACEACT_PANIC: 7384 if (dtrace_priv_kernel_destructive(state)) 7385 dtrace_action_panic(ecb); 7386 continue; 7387 7388 case DTRACEACT_STACK: 7389 if (!dtrace_priv_kernel(state)) 7390 continue; 7391 7392 dtrace_getpcstack((pc_t *)(tomax + valoffs), 7393 size / sizeof (pc_t), probe->dtpr_aframes, 7394 DTRACE_ANCHORED(probe) ? NULL : 7395 (uint32_t *)arg0); 7396 continue; 7397 7398 case DTRACEACT_JSTACK: 7399 case DTRACEACT_USTACK: 7400 if (!dtrace_priv_proc(state)) 7401 continue; 7402 7403 /* 7404 * See comment in DIF_VAR_PID. 7405 */ 7406 if (DTRACE_ANCHORED(mstate.dtms_probe) && 7407 CPU_ON_INTR(CPU)) { 7408 int depth = DTRACE_USTACK_NFRAMES( 7409 rec->dtrd_arg) + 1; 7410 7411 dtrace_bzero((void *)(tomax + valoffs), 7412 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 7413 + depth * sizeof (uint64_t)); 7414 7415 continue; 7416 } 7417 7418 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 7419 curproc->p_dtrace_helpers != NULL) { 7420 /* 7421 * This is the slow path -- we have 7422 * allocated string space, and we're 7423 * getting the stack of a process that 7424 * has helpers. Call into a separate 7425 * routine to perform this processing. 7426 */ 7427 dtrace_action_ustack(&mstate, state, 7428 (uint64_t *)(tomax + valoffs), 7429 rec->dtrd_arg); 7430 continue; 7431 } 7432 7433 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 7434 dtrace_getupcstack((uint64_t *) 7435 (tomax + valoffs), 7436 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 7437 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 7438 continue; 7439 7440 default: 7441 break; 7442 } 7443 7444 dp = act->dta_difo; 7445 ASSERT(dp != NULL); 7446 7447 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 7448 7449 if (*flags & CPU_DTRACE_ERROR) 7450 continue; 7451 7452 switch (act->dta_kind) { 7453 case DTRACEACT_SPECULATE: { 7454 dtrace_rechdr_t *dtrh; 7455 7456 ASSERT(buf == &state->dts_buffer[cpuid]); 7457 buf = dtrace_speculation_buffer(state, 7458 cpuid, val); 7459 7460 if (buf == NULL) { 7461 *flags |= CPU_DTRACE_DROP; 7462 continue; 7463 } 7464 7465 offs = dtrace_buffer_reserve(buf, 7466 ecb->dte_needed, ecb->dte_alignment, 7467 state, NULL); 7468 7469 if (offs < 0) { 7470 *flags |= CPU_DTRACE_DROP; 7471 continue; 7472 } 7473 7474 tomax = buf->dtb_tomax; 7475 ASSERT(tomax != NULL); 7476 7477 if (ecb->dte_size == 0) 7478 continue; 7479 7480 ASSERT3U(ecb->dte_size, >=, 7481 sizeof (dtrace_rechdr_t)); 7482 dtrh = ((void *)(tomax + offs)); 7483 dtrh->dtrh_epid = ecb->dte_epid; 7484 /* 7485 * When the speculation is committed, all of 7486 * the records in the speculative buffer will 7487 * have their timestamps set to the commit 7488 * time. Until then, it is set to a sentinel 7489 * value, for debugability. 7490 */ 7491 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 7492 continue; 7493 } 7494 7495 case DTRACEACT_PRINTM: { 7496 /* The DIF returns a 'memref'. */ 7497 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 7498 7499 /* Get the size from the memref. */ 7500 size = memref[1]; 7501 7502 /* 7503 * Check if the size exceeds the allocated 7504 * buffer size. 7505 */ 7506 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7507 /* Flag a drop! */ 7508 *flags |= CPU_DTRACE_DROP; 7509 continue; 7510 } 7511 7512 /* Store the size in the buffer first. */ 7513 DTRACE_STORE(uintptr_t, tomax, 7514 valoffs, size); 7515 7516 /* 7517 * Offset the buffer address to the start 7518 * of the data. 7519 */ 7520 valoffs += sizeof(uintptr_t); 7521 7522 /* 7523 * Reset to the memory address rather than 7524 * the memref array, then let the BYREF 7525 * code below do the work to store the 7526 * memory data in the buffer. 7527 */ 7528 val = memref[0]; 7529 break; 7530 } 7531 7532 case DTRACEACT_PRINTT: { 7533 /* The DIF returns a 'typeref'. */ 7534 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 7535 char c = '\0' + 1; 7536 size_t s; 7537 7538 /* 7539 * Get the type string length and round it 7540 * up so that the data that follows is 7541 * aligned for easy access. 7542 */ 7543 size_t typs = strlen((char *) typeref[2]) + 1; 7544 typs = roundup(typs, sizeof(uintptr_t)); 7545 7546 /* 7547 *Get the size from the typeref using the 7548 * number of elements and the type size. 7549 */ 7550 size = typeref[1] * typeref[3]; 7551 7552 /* 7553 * Check if the size exceeds the allocated 7554 * buffer size. 7555 */ 7556 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 7557 /* Flag a drop! */ 7558 *flags |= CPU_DTRACE_DROP; 7559 7560 } 7561 7562 /* Store the size in the buffer first. */ 7563 DTRACE_STORE(uintptr_t, tomax, 7564 valoffs, size); 7565 valoffs += sizeof(uintptr_t); 7566 7567 /* Store the type size in the buffer. */ 7568 DTRACE_STORE(uintptr_t, tomax, 7569 valoffs, typeref[3]); 7570 valoffs += sizeof(uintptr_t); 7571 7572 val = typeref[2]; 7573 7574 for (s = 0; s < typs; s++) { 7575 if (c != '\0') 7576 c = dtrace_load8(val++); 7577 7578 DTRACE_STORE(uint8_t, tomax, 7579 valoffs++, c); 7580 } 7581 7582 /* 7583 * Reset to the memory address rather than 7584 * the typeref array, then let the BYREF 7585 * code below do the work to store the 7586 * memory data in the buffer. 7587 */ 7588 val = typeref[0]; 7589 break; 7590 } 7591 7592 case DTRACEACT_CHILL: 7593 if (dtrace_priv_kernel_destructive(state)) 7594 dtrace_action_chill(&mstate, val); 7595 continue; 7596 7597 case DTRACEACT_RAISE: 7598 if (dtrace_priv_proc_destructive(state)) 7599 dtrace_action_raise(val); 7600 continue; 7601 7602 case DTRACEACT_COMMIT: 7603 ASSERT(!committed); 7604 7605 /* 7606 * We need to commit our buffer state. 7607 */ 7608 if (ecb->dte_size) 7609 buf->dtb_offset = offs + ecb->dte_size; 7610 buf = &state->dts_buffer[cpuid]; 7611 dtrace_speculation_commit(state, cpuid, val); 7612 committed = 1; 7613 continue; 7614 7615 case DTRACEACT_DISCARD: 7616 dtrace_speculation_discard(state, cpuid, val); 7617 continue; 7618 7619 case DTRACEACT_DIFEXPR: 7620 case DTRACEACT_LIBACT: 7621 case DTRACEACT_PRINTF: 7622 case DTRACEACT_PRINTA: 7623 case DTRACEACT_SYSTEM: 7624 case DTRACEACT_FREOPEN: 7625 case DTRACEACT_TRACEMEM: 7626 break; 7627 7628 case DTRACEACT_TRACEMEM_DYNSIZE: 7629 tracememsize = val; 7630 break; 7631 7632 case DTRACEACT_SYM: 7633 case DTRACEACT_MOD: 7634 if (!dtrace_priv_kernel(state)) 7635 continue; 7636 break; 7637 7638 case DTRACEACT_USYM: 7639 case DTRACEACT_UMOD: 7640 case DTRACEACT_UADDR: { 7641 #ifdef illumos 7642 struct pid *pid = curthread->t_procp->p_pidp; 7643 #endif 7644 7645 if (!dtrace_priv_proc(state)) 7646 continue; 7647 7648 DTRACE_STORE(uint64_t, tomax, 7649 #ifdef illumos 7650 valoffs, (uint64_t)pid->pid_id); 7651 #else 7652 valoffs, (uint64_t) curproc->p_pid); 7653 #endif 7654 DTRACE_STORE(uint64_t, tomax, 7655 valoffs + sizeof (uint64_t), val); 7656 7657 continue; 7658 } 7659 7660 case DTRACEACT_EXIT: { 7661 /* 7662 * For the exit action, we are going to attempt 7663 * to atomically set our activity to be 7664 * draining. If this fails (either because 7665 * another CPU has beat us to the exit action, 7666 * or because our current activity is something 7667 * other than ACTIVE or WARMUP), we will 7668 * continue. This assures that the exit action 7669 * can be successfully recorded at most once 7670 * when we're in the ACTIVE state. If we're 7671 * encountering the exit() action while in 7672 * COOLDOWN, however, we want to honor the new 7673 * status code. (We know that we're the only 7674 * thread in COOLDOWN, so there is no race.) 7675 */ 7676 void *activity = &state->dts_activity; 7677 dtrace_activity_t current = state->dts_activity; 7678 7679 if (current == DTRACE_ACTIVITY_COOLDOWN) 7680 break; 7681 7682 if (current != DTRACE_ACTIVITY_WARMUP) 7683 current = DTRACE_ACTIVITY_ACTIVE; 7684 7685 if (dtrace_cas32(activity, current, 7686 DTRACE_ACTIVITY_DRAINING) != current) { 7687 *flags |= CPU_DTRACE_DROP; 7688 continue; 7689 } 7690 7691 break; 7692 } 7693 7694 default: 7695 ASSERT(0); 7696 } 7697 7698 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || 7699 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { 7700 uintptr_t end = valoffs + size; 7701 7702 if (tracememsize != 0 && 7703 valoffs + tracememsize < end) { 7704 end = valoffs + tracememsize; 7705 tracememsize = 0; 7706 } 7707 7708 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && 7709 !dtrace_vcanload((void *)(uintptr_t)val, 7710 &dp->dtdo_rtype, &mstate, vstate)) 7711 continue; 7712 7713 dtrace_store_by_ref(dp, tomax, size, &valoffs, 7714 &val, end, act->dta_intuple, 7715 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? 7716 DIF_TF_BYREF: DIF_TF_BYUREF); 7717 continue; 7718 } 7719 7720 switch (size) { 7721 case 0: 7722 break; 7723 7724 case sizeof (uint8_t): 7725 DTRACE_STORE(uint8_t, tomax, valoffs, val); 7726 break; 7727 case sizeof (uint16_t): 7728 DTRACE_STORE(uint16_t, tomax, valoffs, val); 7729 break; 7730 case sizeof (uint32_t): 7731 DTRACE_STORE(uint32_t, tomax, valoffs, val); 7732 break; 7733 case sizeof (uint64_t): 7734 DTRACE_STORE(uint64_t, tomax, valoffs, val); 7735 break; 7736 default: 7737 /* 7738 * Any other size should have been returned by 7739 * reference, not by value. 7740 */ 7741 ASSERT(0); 7742 break; 7743 } 7744 } 7745 7746 if (*flags & CPU_DTRACE_DROP) 7747 continue; 7748 7749 if (*flags & CPU_DTRACE_FAULT) { 7750 int ndx; 7751 dtrace_action_t *err; 7752 7753 buf->dtb_errors++; 7754 7755 if (probe->dtpr_id == dtrace_probeid_error) { 7756 /* 7757 * There's nothing we can do -- we had an 7758 * error on the error probe. We bump an 7759 * error counter to at least indicate that 7760 * this condition happened. 7761 */ 7762 dtrace_error(&state->dts_dblerrors); 7763 continue; 7764 } 7765 7766 if (vtime) { 7767 /* 7768 * Before recursing on dtrace_probe(), we 7769 * need to explicitly clear out our start 7770 * time to prevent it from being accumulated 7771 * into t_dtrace_vtime. 7772 */ 7773 curthread->t_dtrace_start = 0; 7774 } 7775 7776 /* 7777 * Iterate over the actions to figure out which action 7778 * we were processing when we experienced the error. 7779 * Note that act points _past_ the faulting action; if 7780 * act is ecb->dte_action, the fault was in the 7781 * predicate, if it's ecb->dte_action->dta_next it's 7782 * in action #1, and so on. 7783 */ 7784 for (err = ecb->dte_action, ndx = 0; 7785 err != act; err = err->dta_next, ndx++) 7786 continue; 7787 7788 dtrace_probe_error(state, ecb->dte_epid, ndx, 7789 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 7790 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 7791 cpu_core[cpuid].cpuc_dtrace_illval); 7792 7793 continue; 7794 } 7795 7796 if (!committed) 7797 buf->dtb_offset = offs + ecb->dte_size; 7798 } 7799 7800 if (vtime) 7801 curthread->t_dtrace_start = dtrace_gethrtime(); 7802 7803 dtrace_interrupt_enable(cookie); 7804 } 7805 7806 /* 7807 * DTrace Probe Hashing Functions 7808 * 7809 * The functions in this section (and indeed, the functions in remaining 7810 * sections) are not _called_ from probe context. (Any exceptions to this are 7811 * marked with a "Note:".) Rather, they are called from elsewhere in the 7812 * DTrace framework to look-up probes in, add probes to and remove probes from 7813 * the DTrace probe hashes. (Each probe is hashed by each element of the 7814 * probe tuple -- allowing for fast lookups, regardless of what was 7815 * specified.) 7816 */ 7817 static uint_t 7818 dtrace_hash_str(const char *p) 7819 { 7820 unsigned int g; 7821 uint_t hval = 0; 7822 7823 while (*p) { 7824 hval = (hval << 4) + *p++; 7825 if ((g = (hval & 0xf0000000)) != 0) 7826 hval ^= g >> 24; 7827 hval &= ~g; 7828 } 7829 return (hval); 7830 } 7831 7832 static dtrace_hash_t * 7833 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 7834 { 7835 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 7836 7837 hash->dth_stroffs = stroffs; 7838 hash->dth_nextoffs = nextoffs; 7839 hash->dth_prevoffs = prevoffs; 7840 7841 hash->dth_size = 1; 7842 hash->dth_mask = hash->dth_size - 1; 7843 7844 hash->dth_tab = kmem_zalloc(hash->dth_size * 7845 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 7846 7847 return (hash); 7848 } 7849 7850 static void 7851 dtrace_hash_destroy(dtrace_hash_t *hash) 7852 { 7853 #ifdef DEBUG 7854 int i; 7855 7856 for (i = 0; i < hash->dth_size; i++) 7857 ASSERT(hash->dth_tab[i] == NULL); 7858 #endif 7859 7860 kmem_free(hash->dth_tab, 7861 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 7862 kmem_free(hash, sizeof (dtrace_hash_t)); 7863 } 7864 7865 static void 7866 dtrace_hash_resize(dtrace_hash_t *hash) 7867 { 7868 int size = hash->dth_size, i, ndx; 7869 int new_size = hash->dth_size << 1; 7870 int new_mask = new_size - 1; 7871 dtrace_hashbucket_t **new_tab, *bucket, *next; 7872 7873 ASSERT((new_size & new_mask) == 0); 7874 7875 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 7876 7877 for (i = 0; i < size; i++) { 7878 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 7879 dtrace_probe_t *probe = bucket->dthb_chain; 7880 7881 ASSERT(probe != NULL); 7882 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 7883 7884 next = bucket->dthb_next; 7885 bucket->dthb_next = new_tab[ndx]; 7886 new_tab[ndx] = bucket; 7887 } 7888 } 7889 7890 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 7891 hash->dth_tab = new_tab; 7892 hash->dth_size = new_size; 7893 hash->dth_mask = new_mask; 7894 } 7895 7896 static void 7897 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 7898 { 7899 int hashval = DTRACE_HASHSTR(hash, new); 7900 int ndx = hashval & hash->dth_mask; 7901 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7902 dtrace_probe_t **nextp, **prevp; 7903 7904 for (; bucket != NULL; bucket = bucket->dthb_next) { 7905 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 7906 goto add; 7907 } 7908 7909 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 7910 dtrace_hash_resize(hash); 7911 dtrace_hash_add(hash, new); 7912 return; 7913 } 7914 7915 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 7916 bucket->dthb_next = hash->dth_tab[ndx]; 7917 hash->dth_tab[ndx] = bucket; 7918 hash->dth_nbuckets++; 7919 7920 add: 7921 nextp = DTRACE_HASHNEXT(hash, new); 7922 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 7923 *nextp = bucket->dthb_chain; 7924 7925 if (bucket->dthb_chain != NULL) { 7926 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 7927 ASSERT(*prevp == NULL); 7928 *prevp = new; 7929 } 7930 7931 bucket->dthb_chain = new; 7932 bucket->dthb_len++; 7933 } 7934 7935 static dtrace_probe_t * 7936 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 7937 { 7938 int hashval = DTRACE_HASHSTR(hash, template); 7939 int ndx = hashval & hash->dth_mask; 7940 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7941 7942 for (; bucket != NULL; bucket = bucket->dthb_next) { 7943 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7944 return (bucket->dthb_chain); 7945 } 7946 7947 return (NULL); 7948 } 7949 7950 static int 7951 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 7952 { 7953 int hashval = DTRACE_HASHSTR(hash, template); 7954 int ndx = hashval & hash->dth_mask; 7955 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7956 7957 for (; bucket != NULL; bucket = bucket->dthb_next) { 7958 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 7959 return (bucket->dthb_len); 7960 } 7961 7962 return (0); 7963 } 7964 7965 static void 7966 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 7967 { 7968 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 7969 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 7970 7971 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 7972 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 7973 7974 /* 7975 * Find the bucket that we're removing this probe from. 7976 */ 7977 for (; bucket != NULL; bucket = bucket->dthb_next) { 7978 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 7979 break; 7980 } 7981 7982 ASSERT(bucket != NULL); 7983 7984 if (*prevp == NULL) { 7985 if (*nextp == NULL) { 7986 /* 7987 * The removed probe was the only probe on this 7988 * bucket; we need to remove the bucket. 7989 */ 7990 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 7991 7992 ASSERT(bucket->dthb_chain == probe); 7993 ASSERT(b != NULL); 7994 7995 if (b == bucket) { 7996 hash->dth_tab[ndx] = bucket->dthb_next; 7997 } else { 7998 while (b->dthb_next != bucket) 7999 b = b->dthb_next; 8000 b->dthb_next = bucket->dthb_next; 8001 } 8002 8003 ASSERT(hash->dth_nbuckets > 0); 8004 hash->dth_nbuckets--; 8005 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 8006 return; 8007 } 8008 8009 bucket->dthb_chain = *nextp; 8010 } else { 8011 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 8012 } 8013 8014 if (*nextp != NULL) 8015 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 8016 } 8017 8018 /* 8019 * DTrace Utility Functions 8020 * 8021 * These are random utility functions that are _not_ called from probe context. 8022 */ 8023 static int 8024 dtrace_badattr(const dtrace_attribute_t *a) 8025 { 8026 return (a->dtat_name > DTRACE_STABILITY_MAX || 8027 a->dtat_data > DTRACE_STABILITY_MAX || 8028 a->dtat_class > DTRACE_CLASS_MAX); 8029 } 8030 8031 /* 8032 * Return a duplicate copy of a string. If the specified string is NULL, 8033 * this function returns a zero-length string. 8034 */ 8035 static char * 8036 dtrace_strdup(const char *str) 8037 { 8038 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 8039 8040 if (str != NULL) 8041 (void) strcpy(new, str); 8042 8043 return (new); 8044 } 8045 8046 #define DTRACE_ISALPHA(c) \ 8047 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 8048 8049 static int 8050 dtrace_badname(const char *s) 8051 { 8052 char c; 8053 8054 if (s == NULL || (c = *s++) == '\0') 8055 return (0); 8056 8057 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 8058 return (1); 8059 8060 while ((c = *s++) != '\0') { 8061 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 8062 c != '-' && c != '_' && c != '.' && c != '`') 8063 return (1); 8064 } 8065 8066 return (0); 8067 } 8068 8069 static void 8070 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 8071 { 8072 uint32_t priv; 8073 8074 #ifdef illumos 8075 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 8076 /* 8077 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 8078 */ 8079 priv = DTRACE_PRIV_ALL; 8080 } else { 8081 *uidp = crgetuid(cr); 8082 *zoneidp = crgetzoneid(cr); 8083 8084 priv = 0; 8085 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 8086 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 8087 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 8088 priv |= DTRACE_PRIV_USER; 8089 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 8090 priv |= DTRACE_PRIV_PROC; 8091 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 8092 priv |= DTRACE_PRIV_OWNER; 8093 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 8094 priv |= DTRACE_PRIV_ZONEOWNER; 8095 } 8096 #else 8097 priv = DTRACE_PRIV_ALL; 8098 #endif 8099 8100 *privp = priv; 8101 } 8102 8103 #ifdef DTRACE_ERRDEBUG 8104 static void 8105 dtrace_errdebug(const char *str) 8106 { 8107 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 8108 int occupied = 0; 8109 8110 mutex_enter(&dtrace_errlock); 8111 dtrace_errlast = str; 8112 dtrace_errthread = curthread; 8113 8114 while (occupied++ < DTRACE_ERRHASHSZ) { 8115 if (dtrace_errhash[hval].dter_msg == str) { 8116 dtrace_errhash[hval].dter_count++; 8117 goto out; 8118 } 8119 8120 if (dtrace_errhash[hval].dter_msg != NULL) { 8121 hval = (hval + 1) % DTRACE_ERRHASHSZ; 8122 continue; 8123 } 8124 8125 dtrace_errhash[hval].dter_msg = str; 8126 dtrace_errhash[hval].dter_count = 1; 8127 goto out; 8128 } 8129 8130 panic("dtrace: undersized error hash"); 8131 out: 8132 mutex_exit(&dtrace_errlock); 8133 } 8134 #endif 8135 8136 /* 8137 * DTrace Matching Functions 8138 * 8139 * These functions are used to match groups of probes, given some elements of 8140 * a probe tuple, or some globbed expressions for elements of a probe tuple. 8141 */ 8142 static int 8143 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 8144 zoneid_t zoneid) 8145 { 8146 if (priv != DTRACE_PRIV_ALL) { 8147 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 8148 uint32_t match = priv & ppriv; 8149 8150 /* 8151 * No PRIV_DTRACE_* privileges... 8152 */ 8153 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 8154 DTRACE_PRIV_KERNEL)) == 0) 8155 return (0); 8156 8157 /* 8158 * No matching bits, but there were bits to match... 8159 */ 8160 if (match == 0 && ppriv != 0) 8161 return (0); 8162 8163 /* 8164 * Need to have permissions to the process, but don't... 8165 */ 8166 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 8167 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 8168 return (0); 8169 } 8170 8171 /* 8172 * Need to be in the same zone unless we possess the 8173 * privilege to examine all zones. 8174 */ 8175 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 8176 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 8177 return (0); 8178 } 8179 } 8180 8181 return (1); 8182 } 8183 8184 /* 8185 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 8186 * consists of input pattern strings and an ops-vector to evaluate them. 8187 * This function returns >0 for match, 0 for no match, and <0 for error. 8188 */ 8189 static int 8190 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 8191 uint32_t priv, uid_t uid, zoneid_t zoneid) 8192 { 8193 dtrace_provider_t *pvp = prp->dtpr_provider; 8194 int rv; 8195 8196 if (pvp->dtpv_defunct) 8197 return (0); 8198 8199 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 8200 return (rv); 8201 8202 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 8203 return (rv); 8204 8205 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 8206 return (rv); 8207 8208 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 8209 return (rv); 8210 8211 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 8212 return (0); 8213 8214 return (rv); 8215 } 8216 8217 /* 8218 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 8219 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 8220 * libc's version, the kernel version only applies to 8-bit ASCII strings. 8221 * In addition, all of the recursion cases except for '*' matching have been 8222 * unwound. For '*', we still implement recursive evaluation, but a depth 8223 * counter is maintained and matching is aborted if we recurse too deep. 8224 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 8225 */ 8226 static int 8227 dtrace_match_glob(const char *s, const char *p, int depth) 8228 { 8229 const char *olds; 8230 char s1, c; 8231 int gs; 8232 8233 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 8234 return (-1); 8235 8236 if (s == NULL) 8237 s = ""; /* treat NULL as empty string */ 8238 8239 top: 8240 olds = s; 8241 s1 = *s++; 8242 8243 if (p == NULL) 8244 return (0); 8245 8246 if ((c = *p++) == '\0') 8247 return (s1 == '\0'); 8248 8249 switch (c) { 8250 case '[': { 8251 int ok = 0, notflag = 0; 8252 char lc = '\0'; 8253 8254 if (s1 == '\0') 8255 return (0); 8256 8257 if (*p == '!') { 8258 notflag = 1; 8259 p++; 8260 } 8261 8262 if ((c = *p++) == '\0') 8263 return (0); 8264 8265 do { 8266 if (c == '-' && lc != '\0' && *p != ']') { 8267 if ((c = *p++) == '\0') 8268 return (0); 8269 if (c == '\\' && (c = *p++) == '\0') 8270 return (0); 8271 8272 if (notflag) { 8273 if (s1 < lc || s1 > c) 8274 ok++; 8275 else 8276 return (0); 8277 } else if (lc <= s1 && s1 <= c) 8278 ok++; 8279 8280 } else if (c == '\\' && (c = *p++) == '\0') 8281 return (0); 8282 8283 lc = c; /* save left-hand 'c' for next iteration */ 8284 8285 if (notflag) { 8286 if (s1 != c) 8287 ok++; 8288 else 8289 return (0); 8290 } else if (s1 == c) 8291 ok++; 8292 8293 if ((c = *p++) == '\0') 8294 return (0); 8295 8296 } while (c != ']'); 8297 8298 if (ok) 8299 goto top; 8300 8301 return (0); 8302 } 8303 8304 case '\\': 8305 if ((c = *p++) == '\0') 8306 return (0); 8307 /*FALLTHRU*/ 8308 8309 default: 8310 if (c != s1) 8311 return (0); 8312 /*FALLTHRU*/ 8313 8314 case '?': 8315 if (s1 != '\0') 8316 goto top; 8317 return (0); 8318 8319 case '*': 8320 while (*p == '*') 8321 p++; /* consecutive *'s are identical to a single one */ 8322 8323 if (*p == '\0') 8324 return (1); 8325 8326 for (s = olds; *s != '\0'; s++) { 8327 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 8328 return (gs); 8329 } 8330 8331 return (0); 8332 } 8333 } 8334 8335 /*ARGSUSED*/ 8336 static int 8337 dtrace_match_string(const char *s, const char *p, int depth) 8338 { 8339 return (s != NULL && strcmp(s, p) == 0); 8340 } 8341 8342 /*ARGSUSED*/ 8343 static int 8344 dtrace_match_nul(const char *s, const char *p, int depth) 8345 { 8346 return (1); /* always match the empty pattern */ 8347 } 8348 8349 /*ARGSUSED*/ 8350 static int 8351 dtrace_match_nonzero(const char *s, const char *p, int depth) 8352 { 8353 return (s != NULL && s[0] != '\0'); 8354 } 8355 8356 static int 8357 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 8358 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 8359 { 8360 dtrace_probe_t template, *probe; 8361 dtrace_hash_t *hash = NULL; 8362 int len, best = INT_MAX, nmatched = 0; 8363 dtrace_id_t i; 8364 8365 ASSERT(MUTEX_HELD(&dtrace_lock)); 8366 8367 /* 8368 * If the probe ID is specified in the key, just lookup by ID and 8369 * invoke the match callback once if a matching probe is found. 8370 */ 8371 if (pkp->dtpk_id != DTRACE_IDNONE) { 8372 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 8373 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 8374 (void) (*matched)(probe, arg); 8375 nmatched++; 8376 } 8377 return (nmatched); 8378 } 8379 8380 template.dtpr_mod = (char *)pkp->dtpk_mod; 8381 template.dtpr_func = (char *)pkp->dtpk_func; 8382 template.dtpr_name = (char *)pkp->dtpk_name; 8383 8384 /* 8385 * We want to find the most distinct of the module name, function 8386 * name, and name. So for each one that is not a glob pattern or 8387 * empty string, we perform a lookup in the corresponding hash and 8388 * use the hash table with the fewest collisions to do our search. 8389 */ 8390 if (pkp->dtpk_mmatch == &dtrace_match_string && 8391 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 8392 best = len; 8393 hash = dtrace_bymod; 8394 } 8395 8396 if (pkp->dtpk_fmatch == &dtrace_match_string && 8397 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 8398 best = len; 8399 hash = dtrace_byfunc; 8400 } 8401 8402 if (pkp->dtpk_nmatch == &dtrace_match_string && 8403 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 8404 best = len; 8405 hash = dtrace_byname; 8406 } 8407 8408 /* 8409 * If we did not select a hash table, iterate over every probe and 8410 * invoke our callback for each one that matches our input probe key. 8411 */ 8412 if (hash == NULL) { 8413 for (i = 0; i < dtrace_nprobes; i++) { 8414 if ((probe = dtrace_probes[i]) == NULL || 8415 dtrace_match_probe(probe, pkp, priv, uid, 8416 zoneid) <= 0) 8417 continue; 8418 8419 nmatched++; 8420 8421 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8422 break; 8423 } 8424 8425 return (nmatched); 8426 } 8427 8428 /* 8429 * If we selected a hash table, iterate over each probe of the same key 8430 * name and invoke the callback for every probe that matches the other 8431 * attributes of our input probe key. 8432 */ 8433 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 8434 probe = *(DTRACE_HASHNEXT(hash, probe))) { 8435 8436 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 8437 continue; 8438 8439 nmatched++; 8440 8441 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 8442 break; 8443 } 8444 8445 return (nmatched); 8446 } 8447 8448 /* 8449 * Return the function pointer dtrace_probecmp() should use to compare the 8450 * specified pattern with a string. For NULL or empty patterns, we select 8451 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 8452 * For non-empty non-glob strings, we use dtrace_match_string(). 8453 */ 8454 static dtrace_probekey_f * 8455 dtrace_probekey_func(const char *p) 8456 { 8457 char c; 8458 8459 if (p == NULL || *p == '\0') 8460 return (&dtrace_match_nul); 8461 8462 while ((c = *p++) != '\0') { 8463 if (c == '[' || c == '?' || c == '*' || c == '\\') 8464 return (&dtrace_match_glob); 8465 } 8466 8467 return (&dtrace_match_string); 8468 } 8469 8470 /* 8471 * Build a probe comparison key for use with dtrace_match_probe() from the 8472 * given probe description. By convention, a null key only matches anchored 8473 * probes: if each field is the empty string, reset dtpk_fmatch to 8474 * dtrace_match_nonzero(). 8475 */ 8476 static void 8477 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 8478 { 8479 pkp->dtpk_prov = pdp->dtpd_provider; 8480 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 8481 8482 pkp->dtpk_mod = pdp->dtpd_mod; 8483 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 8484 8485 pkp->dtpk_func = pdp->dtpd_func; 8486 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 8487 8488 pkp->dtpk_name = pdp->dtpd_name; 8489 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 8490 8491 pkp->dtpk_id = pdp->dtpd_id; 8492 8493 if (pkp->dtpk_id == DTRACE_IDNONE && 8494 pkp->dtpk_pmatch == &dtrace_match_nul && 8495 pkp->dtpk_mmatch == &dtrace_match_nul && 8496 pkp->dtpk_fmatch == &dtrace_match_nul && 8497 pkp->dtpk_nmatch == &dtrace_match_nul) 8498 pkp->dtpk_fmatch = &dtrace_match_nonzero; 8499 } 8500 8501 /* 8502 * DTrace Provider-to-Framework API Functions 8503 * 8504 * These functions implement much of the Provider-to-Framework API, as 8505 * described in <sys/dtrace.h>. The parts of the API not in this section are 8506 * the functions in the API for probe management (found below), and 8507 * dtrace_probe() itself (found above). 8508 */ 8509 8510 /* 8511 * Register the calling provider with the DTrace framework. This should 8512 * generally be called by DTrace providers in their attach(9E) entry point. 8513 */ 8514 int 8515 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 8516 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 8517 { 8518 dtrace_provider_t *provider; 8519 8520 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 8521 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8522 "arguments", name ? name : "<NULL>"); 8523 return (EINVAL); 8524 } 8525 8526 if (name[0] == '\0' || dtrace_badname(name)) { 8527 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8528 "provider name", name); 8529 return (EINVAL); 8530 } 8531 8532 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 8533 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 8534 pops->dtps_destroy == NULL || 8535 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 8536 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8537 "provider ops", name); 8538 return (EINVAL); 8539 } 8540 8541 if (dtrace_badattr(&pap->dtpa_provider) || 8542 dtrace_badattr(&pap->dtpa_mod) || 8543 dtrace_badattr(&pap->dtpa_func) || 8544 dtrace_badattr(&pap->dtpa_name) || 8545 dtrace_badattr(&pap->dtpa_args)) { 8546 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8547 "provider attributes", name); 8548 return (EINVAL); 8549 } 8550 8551 if (priv & ~DTRACE_PRIV_ALL) { 8552 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 8553 "privilege attributes", name); 8554 return (EINVAL); 8555 } 8556 8557 if ((priv & DTRACE_PRIV_KERNEL) && 8558 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 8559 pops->dtps_usermode == NULL) { 8560 cmn_err(CE_WARN, "failed to register provider '%s': need " 8561 "dtps_usermode() op for given privilege attributes", name); 8562 return (EINVAL); 8563 } 8564 8565 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 8566 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8567 (void) strcpy(provider->dtpv_name, name); 8568 8569 provider->dtpv_attr = *pap; 8570 provider->dtpv_priv.dtpp_flags = priv; 8571 if (cr != NULL) { 8572 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 8573 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 8574 } 8575 provider->dtpv_pops = *pops; 8576 8577 if (pops->dtps_provide == NULL) { 8578 ASSERT(pops->dtps_provide_module != NULL); 8579 provider->dtpv_pops.dtps_provide = 8580 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 8581 } 8582 8583 if (pops->dtps_provide_module == NULL) { 8584 ASSERT(pops->dtps_provide != NULL); 8585 provider->dtpv_pops.dtps_provide_module = 8586 (void (*)(void *, modctl_t *))dtrace_nullop; 8587 } 8588 8589 if (pops->dtps_suspend == NULL) { 8590 ASSERT(pops->dtps_resume == NULL); 8591 provider->dtpv_pops.dtps_suspend = 8592 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8593 provider->dtpv_pops.dtps_resume = 8594 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 8595 } 8596 8597 provider->dtpv_arg = arg; 8598 *idp = (dtrace_provider_id_t)provider; 8599 8600 if (pops == &dtrace_provider_ops) { 8601 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8602 ASSERT(MUTEX_HELD(&dtrace_lock)); 8603 ASSERT(dtrace_anon.dta_enabling == NULL); 8604 8605 /* 8606 * We make sure that the DTrace provider is at the head of 8607 * the provider chain. 8608 */ 8609 provider->dtpv_next = dtrace_provider; 8610 dtrace_provider = provider; 8611 return (0); 8612 } 8613 8614 mutex_enter(&dtrace_provider_lock); 8615 mutex_enter(&dtrace_lock); 8616 8617 /* 8618 * If there is at least one provider registered, we'll add this 8619 * provider after the first provider. 8620 */ 8621 if (dtrace_provider != NULL) { 8622 provider->dtpv_next = dtrace_provider->dtpv_next; 8623 dtrace_provider->dtpv_next = provider; 8624 } else { 8625 dtrace_provider = provider; 8626 } 8627 8628 if (dtrace_retained != NULL) { 8629 dtrace_enabling_provide(provider); 8630 8631 /* 8632 * Now we need to call dtrace_enabling_matchall() -- which 8633 * will acquire cpu_lock and dtrace_lock. We therefore need 8634 * to drop all of our locks before calling into it... 8635 */ 8636 mutex_exit(&dtrace_lock); 8637 mutex_exit(&dtrace_provider_lock); 8638 dtrace_enabling_matchall(); 8639 8640 return (0); 8641 } 8642 8643 mutex_exit(&dtrace_lock); 8644 mutex_exit(&dtrace_provider_lock); 8645 8646 return (0); 8647 } 8648 8649 /* 8650 * Unregister the specified provider from the DTrace framework. This should 8651 * generally be called by DTrace providers in their detach(9E) entry point. 8652 */ 8653 int 8654 dtrace_unregister(dtrace_provider_id_t id) 8655 { 8656 dtrace_provider_t *old = (dtrace_provider_t *)id; 8657 dtrace_provider_t *prev = NULL; 8658 int i, self = 0, noreap = 0; 8659 dtrace_probe_t *probe, *first = NULL; 8660 8661 if (old->dtpv_pops.dtps_enable == 8662 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 8663 /* 8664 * If DTrace itself is the provider, we're called with locks 8665 * already held. 8666 */ 8667 ASSERT(old == dtrace_provider); 8668 #ifdef illumos 8669 ASSERT(dtrace_devi != NULL); 8670 #endif 8671 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8672 ASSERT(MUTEX_HELD(&dtrace_lock)); 8673 self = 1; 8674 8675 if (dtrace_provider->dtpv_next != NULL) { 8676 /* 8677 * There's another provider here; return failure. 8678 */ 8679 return (EBUSY); 8680 } 8681 } else { 8682 mutex_enter(&dtrace_provider_lock); 8683 #ifdef illumos 8684 mutex_enter(&mod_lock); 8685 #endif 8686 mutex_enter(&dtrace_lock); 8687 } 8688 8689 /* 8690 * If anyone has /dev/dtrace open, or if there are anonymous enabled 8691 * probes, we refuse to let providers slither away, unless this 8692 * provider has already been explicitly invalidated. 8693 */ 8694 if (!old->dtpv_defunct && 8695 (dtrace_opens || (dtrace_anon.dta_state != NULL && 8696 dtrace_anon.dta_state->dts_necbs > 0))) { 8697 if (!self) { 8698 mutex_exit(&dtrace_lock); 8699 #ifdef illumos 8700 mutex_exit(&mod_lock); 8701 #endif 8702 mutex_exit(&dtrace_provider_lock); 8703 } 8704 return (EBUSY); 8705 } 8706 8707 /* 8708 * Attempt to destroy the probes associated with this provider. 8709 */ 8710 for (i = 0; i < dtrace_nprobes; i++) { 8711 if ((probe = dtrace_probes[i]) == NULL) 8712 continue; 8713 8714 if (probe->dtpr_provider != old) 8715 continue; 8716 8717 if (probe->dtpr_ecb == NULL) 8718 continue; 8719 8720 /* 8721 * If we are trying to unregister a defunct provider, and the 8722 * provider was made defunct within the interval dictated by 8723 * dtrace_unregister_defunct_reap, we'll (asynchronously) 8724 * attempt to reap our enablings. To denote that the provider 8725 * should reattempt to unregister itself at some point in the 8726 * future, we will return a differentiable error code (EAGAIN 8727 * instead of EBUSY) in this case. 8728 */ 8729 if (dtrace_gethrtime() - old->dtpv_defunct > 8730 dtrace_unregister_defunct_reap) 8731 noreap = 1; 8732 8733 if (!self) { 8734 mutex_exit(&dtrace_lock); 8735 #ifdef illumos 8736 mutex_exit(&mod_lock); 8737 #endif 8738 mutex_exit(&dtrace_provider_lock); 8739 } 8740 8741 if (noreap) 8742 return (EBUSY); 8743 8744 (void) taskq_dispatch(dtrace_taskq, 8745 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 8746 8747 return (EAGAIN); 8748 } 8749 8750 /* 8751 * All of the probes for this provider are disabled; we can safely 8752 * remove all of them from their hash chains and from the probe array. 8753 */ 8754 for (i = 0; i < dtrace_nprobes; i++) { 8755 if ((probe = dtrace_probes[i]) == NULL) 8756 continue; 8757 8758 if (probe->dtpr_provider != old) 8759 continue; 8760 8761 dtrace_probes[i] = NULL; 8762 8763 dtrace_hash_remove(dtrace_bymod, probe); 8764 dtrace_hash_remove(dtrace_byfunc, probe); 8765 dtrace_hash_remove(dtrace_byname, probe); 8766 8767 if (first == NULL) { 8768 first = probe; 8769 probe->dtpr_nextmod = NULL; 8770 } else { 8771 probe->dtpr_nextmod = first; 8772 first = probe; 8773 } 8774 } 8775 8776 /* 8777 * The provider's probes have been removed from the hash chains and 8778 * from the probe array. Now issue a dtrace_sync() to be sure that 8779 * everyone has cleared out from any probe array processing. 8780 */ 8781 dtrace_sync(); 8782 8783 for (probe = first; probe != NULL; probe = first) { 8784 first = probe->dtpr_nextmod; 8785 8786 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 8787 probe->dtpr_arg); 8788 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8789 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8790 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8791 #ifdef illumos 8792 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 8793 #else 8794 free_unr(dtrace_arena, probe->dtpr_id); 8795 #endif 8796 kmem_free(probe, sizeof (dtrace_probe_t)); 8797 } 8798 8799 if ((prev = dtrace_provider) == old) { 8800 #ifdef illumos 8801 ASSERT(self || dtrace_devi == NULL); 8802 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 8803 #endif 8804 dtrace_provider = old->dtpv_next; 8805 } else { 8806 while (prev != NULL && prev->dtpv_next != old) 8807 prev = prev->dtpv_next; 8808 8809 if (prev == NULL) { 8810 panic("attempt to unregister non-existent " 8811 "dtrace provider %p\n", (void *)id); 8812 } 8813 8814 prev->dtpv_next = old->dtpv_next; 8815 } 8816 8817 if (!self) { 8818 mutex_exit(&dtrace_lock); 8819 #ifdef illumos 8820 mutex_exit(&mod_lock); 8821 #endif 8822 mutex_exit(&dtrace_provider_lock); 8823 } 8824 8825 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 8826 kmem_free(old, sizeof (dtrace_provider_t)); 8827 8828 return (0); 8829 } 8830 8831 /* 8832 * Invalidate the specified provider. All subsequent probe lookups for the 8833 * specified provider will fail, but its probes will not be removed. 8834 */ 8835 void 8836 dtrace_invalidate(dtrace_provider_id_t id) 8837 { 8838 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 8839 8840 ASSERT(pvp->dtpv_pops.dtps_enable != 8841 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8842 8843 mutex_enter(&dtrace_provider_lock); 8844 mutex_enter(&dtrace_lock); 8845 8846 pvp->dtpv_defunct = dtrace_gethrtime(); 8847 8848 mutex_exit(&dtrace_lock); 8849 mutex_exit(&dtrace_provider_lock); 8850 } 8851 8852 /* 8853 * Indicate whether or not DTrace has attached. 8854 */ 8855 int 8856 dtrace_attached(void) 8857 { 8858 /* 8859 * dtrace_provider will be non-NULL iff the DTrace driver has 8860 * attached. (It's non-NULL because DTrace is always itself a 8861 * provider.) 8862 */ 8863 return (dtrace_provider != NULL); 8864 } 8865 8866 /* 8867 * Remove all the unenabled probes for the given provider. This function is 8868 * not unlike dtrace_unregister(), except that it doesn't remove the provider 8869 * -- just as many of its associated probes as it can. 8870 */ 8871 int 8872 dtrace_condense(dtrace_provider_id_t id) 8873 { 8874 dtrace_provider_t *prov = (dtrace_provider_t *)id; 8875 int i; 8876 dtrace_probe_t *probe; 8877 8878 /* 8879 * Make sure this isn't the dtrace provider itself. 8880 */ 8881 ASSERT(prov->dtpv_pops.dtps_enable != 8882 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 8883 8884 mutex_enter(&dtrace_provider_lock); 8885 mutex_enter(&dtrace_lock); 8886 8887 /* 8888 * Attempt to destroy the probes associated with this provider. 8889 */ 8890 for (i = 0; i < dtrace_nprobes; i++) { 8891 if ((probe = dtrace_probes[i]) == NULL) 8892 continue; 8893 8894 if (probe->dtpr_provider != prov) 8895 continue; 8896 8897 if (probe->dtpr_ecb != NULL) 8898 continue; 8899 8900 dtrace_probes[i] = NULL; 8901 8902 dtrace_hash_remove(dtrace_bymod, probe); 8903 dtrace_hash_remove(dtrace_byfunc, probe); 8904 dtrace_hash_remove(dtrace_byname, probe); 8905 8906 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 8907 probe->dtpr_arg); 8908 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 8909 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 8910 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 8911 kmem_free(probe, sizeof (dtrace_probe_t)); 8912 #ifdef illumos 8913 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 8914 #else 8915 free_unr(dtrace_arena, i + 1); 8916 #endif 8917 } 8918 8919 mutex_exit(&dtrace_lock); 8920 mutex_exit(&dtrace_provider_lock); 8921 8922 return (0); 8923 } 8924 8925 /* 8926 * DTrace Probe Management Functions 8927 * 8928 * The functions in this section perform the DTrace probe management, 8929 * including functions to create probes, look-up probes, and call into the 8930 * providers to request that probes be provided. Some of these functions are 8931 * in the Provider-to-Framework API; these functions can be identified by the 8932 * fact that they are not declared "static". 8933 */ 8934 8935 /* 8936 * Create a probe with the specified module name, function name, and name. 8937 */ 8938 dtrace_id_t 8939 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 8940 const char *func, const char *name, int aframes, void *arg) 8941 { 8942 dtrace_probe_t *probe, **probes; 8943 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 8944 dtrace_id_t id; 8945 8946 if (provider == dtrace_provider) { 8947 ASSERT(MUTEX_HELD(&dtrace_lock)); 8948 } else { 8949 mutex_enter(&dtrace_lock); 8950 } 8951 8952 #ifdef illumos 8953 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 8954 VM_BESTFIT | VM_SLEEP); 8955 #else 8956 id = alloc_unr(dtrace_arena); 8957 #endif 8958 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 8959 8960 probe->dtpr_id = id; 8961 probe->dtpr_gen = dtrace_probegen++; 8962 probe->dtpr_mod = dtrace_strdup(mod); 8963 probe->dtpr_func = dtrace_strdup(func); 8964 probe->dtpr_name = dtrace_strdup(name); 8965 probe->dtpr_arg = arg; 8966 probe->dtpr_aframes = aframes; 8967 probe->dtpr_provider = provider; 8968 8969 dtrace_hash_add(dtrace_bymod, probe); 8970 dtrace_hash_add(dtrace_byfunc, probe); 8971 dtrace_hash_add(dtrace_byname, probe); 8972 8973 if (id - 1 >= dtrace_nprobes) { 8974 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 8975 size_t nsize = osize << 1; 8976 8977 if (nsize == 0) { 8978 ASSERT(osize == 0); 8979 ASSERT(dtrace_probes == NULL); 8980 nsize = sizeof (dtrace_probe_t *); 8981 } 8982 8983 probes = kmem_zalloc(nsize, KM_SLEEP); 8984 8985 if (dtrace_probes == NULL) { 8986 ASSERT(osize == 0); 8987 dtrace_probes = probes; 8988 dtrace_nprobes = 1; 8989 } else { 8990 dtrace_probe_t **oprobes = dtrace_probes; 8991 8992 bcopy(oprobes, probes, osize); 8993 dtrace_membar_producer(); 8994 dtrace_probes = probes; 8995 8996 dtrace_sync(); 8997 8998 /* 8999 * All CPUs are now seeing the new probes array; we can 9000 * safely free the old array. 9001 */ 9002 kmem_free(oprobes, osize); 9003 dtrace_nprobes <<= 1; 9004 } 9005 9006 ASSERT(id - 1 < dtrace_nprobes); 9007 } 9008 9009 ASSERT(dtrace_probes[id - 1] == NULL); 9010 dtrace_probes[id - 1] = probe; 9011 9012 if (provider != dtrace_provider) 9013 mutex_exit(&dtrace_lock); 9014 9015 return (id); 9016 } 9017 9018 static dtrace_probe_t * 9019 dtrace_probe_lookup_id(dtrace_id_t id) 9020 { 9021 ASSERT(MUTEX_HELD(&dtrace_lock)); 9022 9023 if (id == 0 || id > dtrace_nprobes) 9024 return (NULL); 9025 9026 return (dtrace_probes[id - 1]); 9027 } 9028 9029 static int 9030 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 9031 { 9032 *((dtrace_id_t *)arg) = probe->dtpr_id; 9033 9034 return (DTRACE_MATCH_DONE); 9035 } 9036 9037 /* 9038 * Look up a probe based on provider and one or more of module name, function 9039 * name and probe name. 9040 */ 9041 dtrace_id_t 9042 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 9043 char *func, char *name) 9044 { 9045 dtrace_probekey_t pkey; 9046 dtrace_id_t id; 9047 int match; 9048 9049 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 9050 pkey.dtpk_pmatch = &dtrace_match_string; 9051 pkey.dtpk_mod = mod; 9052 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 9053 pkey.dtpk_func = func; 9054 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 9055 pkey.dtpk_name = name; 9056 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 9057 pkey.dtpk_id = DTRACE_IDNONE; 9058 9059 mutex_enter(&dtrace_lock); 9060 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 9061 dtrace_probe_lookup_match, &id); 9062 mutex_exit(&dtrace_lock); 9063 9064 ASSERT(match == 1 || match == 0); 9065 return (match ? id : 0); 9066 } 9067 9068 /* 9069 * Returns the probe argument associated with the specified probe. 9070 */ 9071 void * 9072 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 9073 { 9074 dtrace_probe_t *probe; 9075 void *rval = NULL; 9076 9077 mutex_enter(&dtrace_lock); 9078 9079 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 9080 probe->dtpr_provider == (dtrace_provider_t *)id) 9081 rval = probe->dtpr_arg; 9082 9083 mutex_exit(&dtrace_lock); 9084 9085 return (rval); 9086 } 9087 9088 /* 9089 * Copy a probe into a probe description. 9090 */ 9091 static void 9092 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 9093 { 9094 bzero(pdp, sizeof (dtrace_probedesc_t)); 9095 pdp->dtpd_id = prp->dtpr_id; 9096 9097 (void) strncpy(pdp->dtpd_provider, 9098 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 9099 9100 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 9101 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 9102 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 9103 } 9104 9105 /* 9106 * Called to indicate that a probe -- or probes -- should be provided by a 9107 * specfied provider. If the specified description is NULL, the provider will 9108 * be told to provide all of its probes. (This is done whenever a new 9109 * consumer comes along, or whenever a retained enabling is to be matched.) If 9110 * the specified description is non-NULL, the provider is given the 9111 * opportunity to dynamically provide the specified probe, allowing providers 9112 * to support the creation of probes on-the-fly. (So-called _autocreated_ 9113 * probes.) If the provider is NULL, the operations will be applied to all 9114 * providers; if the provider is non-NULL the operations will only be applied 9115 * to the specified provider. The dtrace_provider_lock must be held, and the 9116 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 9117 * will need to grab the dtrace_lock when it reenters the framework through 9118 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 9119 */ 9120 static void 9121 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 9122 { 9123 #ifdef illumos 9124 modctl_t *ctl; 9125 #endif 9126 int all = 0; 9127 9128 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9129 9130 if (prv == NULL) { 9131 all = 1; 9132 prv = dtrace_provider; 9133 } 9134 9135 do { 9136 /* 9137 * First, call the blanket provide operation. 9138 */ 9139 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 9140 9141 #ifdef illumos 9142 /* 9143 * Now call the per-module provide operation. We will grab 9144 * mod_lock to prevent the list from being modified. Note 9145 * that this also prevents the mod_busy bits from changing. 9146 * (mod_busy can only be changed with mod_lock held.) 9147 */ 9148 mutex_enter(&mod_lock); 9149 9150 ctl = &modules; 9151 do { 9152 if (ctl->mod_busy || ctl->mod_mp == NULL) 9153 continue; 9154 9155 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 9156 9157 } while ((ctl = ctl->mod_next) != &modules); 9158 9159 mutex_exit(&mod_lock); 9160 #endif 9161 } while (all && (prv = prv->dtpv_next) != NULL); 9162 } 9163 9164 #ifdef illumos 9165 /* 9166 * Iterate over each probe, and call the Framework-to-Provider API function 9167 * denoted by offs. 9168 */ 9169 static void 9170 dtrace_probe_foreach(uintptr_t offs) 9171 { 9172 dtrace_provider_t *prov; 9173 void (*func)(void *, dtrace_id_t, void *); 9174 dtrace_probe_t *probe; 9175 dtrace_icookie_t cookie; 9176 int i; 9177 9178 /* 9179 * We disable interrupts to walk through the probe array. This is 9180 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 9181 * won't see stale data. 9182 */ 9183 cookie = dtrace_interrupt_disable(); 9184 9185 for (i = 0; i < dtrace_nprobes; i++) { 9186 if ((probe = dtrace_probes[i]) == NULL) 9187 continue; 9188 9189 if (probe->dtpr_ecb == NULL) { 9190 /* 9191 * This probe isn't enabled -- don't call the function. 9192 */ 9193 continue; 9194 } 9195 9196 prov = probe->dtpr_provider; 9197 func = *((void(**)(void *, dtrace_id_t, void *)) 9198 ((uintptr_t)&prov->dtpv_pops + offs)); 9199 9200 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 9201 } 9202 9203 dtrace_interrupt_enable(cookie); 9204 } 9205 #endif 9206 9207 static int 9208 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 9209 { 9210 dtrace_probekey_t pkey; 9211 uint32_t priv; 9212 uid_t uid; 9213 zoneid_t zoneid; 9214 9215 ASSERT(MUTEX_HELD(&dtrace_lock)); 9216 dtrace_ecb_create_cache = NULL; 9217 9218 if (desc == NULL) { 9219 /* 9220 * If we're passed a NULL description, we're being asked to 9221 * create an ECB with a NULL probe. 9222 */ 9223 (void) dtrace_ecb_create_enable(NULL, enab); 9224 return (0); 9225 } 9226 9227 dtrace_probekey(desc, &pkey); 9228 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 9229 &priv, &uid, &zoneid); 9230 9231 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 9232 enab)); 9233 } 9234 9235 /* 9236 * DTrace Helper Provider Functions 9237 */ 9238 static void 9239 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 9240 { 9241 attr->dtat_name = DOF_ATTR_NAME(dofattr); 9242 attr->dtat_data = DOF_ATTR_DATA(dofattr); 9243 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 9244 } 9245 9246 static void 9247 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 9248 const dof_provider_t *dofprov, char *strtab) 9249 { 9250 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 9251 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 9252 dofprov->dofpv_provattr); 9253 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 9254 dofprov->dofpv_modattr); 9255 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 9256 dofprov->dofpv_funcattr); 9257 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 9258 dofprov->dofpv_nameattr); 9259 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 9260 dofprov->dofpv_argsattr); 9261 } 9262 9263 static void 9264 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9265 { 9266 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9267 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9268 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 9269 dof_provider_t *provider; 9270 dof_probe_t *probe; 9271 uint32_t *off, *enoff; 9272 uint8_t *arg; 9273 char *strtab; 9274 uint_t i, nprobes; 9275 dtrace_helper_provdesc_t dhpv; 9276 dtrace_helper_probedesc_t dhpb; 9277 dtrace_meta_t *meta = dtrace_meta_pid; 9278 dtrace_mops_t *mops = &meta->dtm_mops; 9279 void *parg; 9280 9281 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9282 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9283 provider->dofpv_strtab * dof->dofh_secsize); 9284 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9285 provider->dofpv_probes * dof->dofh_secsize); 9286 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9287 provider->dofpv_prargs * dof->dofh_secsize); 9288 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9289 provider->dofpv_proffs * dof->dofh_secsize); 9290 9291 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9292 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 9293 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 9294 enoff = NULL; 9295 9296 /* 9297 * See dtrace_helper_provider_validate(). 9298 */ 9299 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 9300 provider->dofpv_prenoffs != DOF_SECT_NONE) { 9301 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9302 provider->dofpv_prenoffs * dof->dofh_secsize); 9303 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 9304 } 9305 9306 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 9307 9308 /* 9309 * Create the provider. 9310 */ 9311 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9312 9313 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 9314 return; 9315 9316 meta->dtm_count++; 9317 9318 /* 9319 * Create the probes. 9320 */ 9321 for (i = 0; i < nprobes; i++) { 9322 probe = (dof_probe_t *)(uintptr_t)(daddr + 9323 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 9324 9325 dhpb.dthpb_mod = dhp->dofhp_mod; 9326 dhpb.dthpb_func = strtab + probe->dofpr_func; 9327 dhpb.dthpb_name = strtab + probe->dofpr_name; 9328 dhpb.dthpb_base = probe->dofpr_addr; 9329 dhpb.dthpb_offs = off + probe->dofpr_offidx; 9330 dhpb.dthpb_noffs = probe->dofpr_noffs; 9331 if (enoff != NULL) { 9332 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 9333 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 9334 } else { 9335 dhpb.dthpb_enoffs = NULL; 9336 dhpb.dthpb_nenoffs = 0; 9337 } 9338 dhpb.dthpb_args = arg + probe->dofpr_argidx; 9339 dhpb.dthpb_nargc = probe->dofpr_nargc; 9340 dhpb.dthpb_xargc = probe->dofpr_xargc; 9341 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 9342 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 9343 9344 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 9345 } 9346 } 9347 9348 static void 9349 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 9350 { 9351 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9352 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9353 int i; 9354 9355 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9356 9357 for (i = 0; i < dof->dofh_secnum; i++) { 9358 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9359 dof->dofh_secoff + i * dof->dofh_secsize); 9360 9361 if (sec->dofs_type != DOF_SECT_PROVIDER) 9362 continue; 9363 9364 dtrace_helper_provide_one(dhp, sec, pid); 9365 } 9366 9367 /* 9368 * We may have just created probes, so we must now rematch against 9369 * any retained enablings. Note that this call will acquire both 9370 * cpu_lock and dtrace_lock; the fact that we are holding 9371 * dtrace_meta_lock now is what defines the ordering with respect to 9372 * these three locks. 9373 */ 9374 dtrace_enabling_matchall(); 9375 } 9376 9377 static void 9378 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 9379 { 9380 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9381 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9382 dof_sec_t *str_sec; 9383 dof_provider_t *provider; 9384 char *strtab; 9385 dtrace_helper_provdesc_t dhpv; 9386 dtrace_meta_t *meta = dtrace_meta_pid; 9387 dtrace_mops_t *mops = &meta->dtm_mops; 9388 9389 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 9390 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 9391 provider->dofpv_strtab * dof->dofh_secsize); 9392 9393 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 9394 9395 /* 9396 * Create the provider. 9397 */ 9398 dtrace_dofprov2hprov(&dhpv, provider, strtab); 9399 9400 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 9401 9402 meta->dtm_count--; 9403 } 9404 9405 static void 9406 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 9407 { 9408 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 9409 dof_hdr_t *dof = (dof_hdr_t *)daddr; 9410 int i; 9411 9412 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 9413 9414 for (i = 0; i < dof->dofh_secnum; i++) { 9415 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 9416 dof->dofh_secoff + i * dof->dofh_secsize); 9417 9418 if (sec->dofs_type != DOF_SECT_PROVIDER) 9419 continue; 9420 9421 dtrace_helper_provider_remove_one(dhp, sec, pid); 9422 } 9423 } 9424 9425 /* 9426 * DTrace Meta Provider-to-Framework API Functions 9427 * 9428 * These functions implement the Meta Provider-to-Framework API, as described 9429 * in <sys/dtrace.h>. 9430 */ 9431 int 9432 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 9433 dtrace_meta_provider_id_t *idp) 9434 { 9435 dtrace_meta_t *meta; 9436 dtrace_helpers_t *help, *next; 9437 int i; 9438 9439 *idp = DTRACE_METAPROVNONE; 9440 9441 /* 9442 * We strictly don't need the name, but we hold onto it for 9443 * debuggability. All hail error queues! 9444 */ 9445 if (name == NULL) { 9446 cmn_err(CE_WARN, "failed to register meta-provider: " 9447 "invalid name"); 9448 return (EINVAL); 9449 } 9450 9451 if (mops == NULL || 9452 mops->dtms_create_probe == NULL || 9453 mops->dtms_provide_pid == NULL || 9454 mops->dtms_remove_pid == NULL) { 9455 cmn_err(CE_WARN, "failed to register meta-register %s: " 9456 "invalid ops", name); 9457 return (EINVAL); 9458 } 9459 9460 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 9461 meta->dtm_mops = *mops; 9462 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 9463 (void) strcpy(meta->dtm_name, name); 9464 meta->dtm_arg = arg; 9465 9466 mutex_enter(&dtrace_meta_lock); 9467 mutex_enter(&dtrace_lock); 9468 9469 if (dtrace_meta_pid != NULL) { 9470 mutex_exit(&dtrace_lock); 9471 mutex_exit(&dtrace_meta_lock); 9472 cmn_err(CE_WARN, "failed to register meta-register %s: " 9473 "user-land meta-provider exists", name); 9474 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 9475 kmem_free(meta, sizeof (dtrace_meta_t)); 9476 return (EINVAL); 9477 } 9478 9479 dtrace_meta_pid = meta; 9480 *idp = (dtrace_meta_provider_id_t)meta; 9481 9482 /* 9483 * If there are providers and probes ready to go, pass them 9484 * off to the new meta provider now. 9485 */ 9486 9487 help = dtrace_deferred_pid; 9488 dtrace_deferred_pid = NULL; 9489 9490 mutex_exit(&dtrace_lock); 9491 9492 while (help != NULL) { 9493 for (i = 0; i < help->dthps_nprovs; i++) { 9494 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 9495 help->dthps_pid); 9496 } 9497 9498 next = help->dthps_next; 9499 help->dthps_next = NULL; 9500 help->dthps_prev = NULL; 9501 help->dthps_deferred = 0; 9502 help = next; 9503 } 9504 9505 mutex_exit(&dtrace_meta_lock); 9506 9507 return (0); 9508 } 9509 9510 int 9511 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 9512 { 9513 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 9514 9515 mutex_enter(&dtrace_meta_lock); 9516 mutex_enter(&dtrace_lock); 9517 9518 if (old == dtrace_meta_pid) { 9519 pp = &dtrace_meta_pid; 9520 } else { 9521 panic("attempt to unregister non-existent " 9522 "dtrace meta-provider %p\n", (void *)old); 9523 } 9524 9525 if (old->dtm_count != 0) { 9526 mutex_exit(&dtrace_lock); 9527 mutex_exit(&dtrace_meta_lock); 9528 return (EBUSY); 9529 } 9530 9531 *pp = NULL; 9532 9533 mutex_exit(&dtrace_lock); 9534 mutex_exit(&dtrace_meta_lock); 9535 9536 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 9537 kmem_free(old, sizeof (dtrace_meta_t)); 9538 9539 return (0); 9540 } 9541 9542 9543 /* 9544 * DTrace DIF Object Functions 9545 */ 9546 static int 9547 dtrace_difo_err(uint_t pc, const char *format, ...) 9548 { 9549 if (dtrace_err_verbose) { 9550 va_list alist; 9551 9552 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 9553 va_start(alist, format); 9554 (void) vuprintf(format, alist); 9555 va_end(alist); 9556 } 9557 9558 #ifdef DTRACE_ERRDEBUG 9559 dtrace_errdebug(format); 9560 #endif 9561 return (1); 9562 } 9563 9564 /* 9565 * Validate a DTrace DIF object by checking the IR instructions. The following 9566 * rules are currently enforced by dtrace_difo_validate(): 9567 * 9568 * 1. Each instruction must have a valid opcode 9569 * 2. Each register, string, variable, or subroutine reference must be valid 9570 * 3. No instruction can modify register %r0 (must be zero) 9571 * 4. All instruction reserved bits must be set to zero 9572 * 5. The last instruction must be a "ret" instruction 9573 * 6. All branch targets must reference a valid instruction _after_ the branch 9574 */ 9575 static int 9576 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 9577 cred_t *cr) 9578 { 9579 int err = 0, i; 9580 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9581 int kcheckload; 9582 uint_t pc; 9583 9584 kcheckload = cr == NULL || 9585 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 9586 9587 dp->dtdo_destructive = 0; 9588 9589 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 9590 dif_instr_t instr = dp->dtdo_buf[pc]; 9591 9592 uint_t r1 = DIF_INSTR_R1(instr); 9593 uint_t r2 = DIF_INSTR_R2(instr); 9594 uint_t rd = DIF_INSTR_RD(instr); 9595 uint_t rs = DIF_INSTR_RS(instr); 9596 uint_t label = DIF_INSTR_LABEL(instr); 9597 uint_t v = DIF_INSTR_VAR(instr); 9598 uint_t subr = DIF_INSTR_SUBR(instr); 9599 uint_t type = DIF_INSTR_TYPE(instr); 9600 uint_t op = DIF_INSTR_OP(instr); 9601 9602 switch (op) { 9603 case DIF_OP_OR: 9604 case DIF_OP_XOR: 9605 case DIF_OP_AND: 9606 case DIF_OP_SLL: 9607 case DIF_OP_SRL: 9608 case DIF_OP_SRA: 9609 case DIF_OP_SUB: 9610 case DIF_OP_ADD: 9611 case DIF_OP_MUL: 9612 case DIF_OP_SDIV: 9613 case DIF_OP_UDIV: 9614 case DIF_OP_SREM: 9615 case DIF_OP_UREM: 9616 case DIF_OP_COPYS: 9617 if (r1 >= nregs) 9618 err += efunc(pc, "invalid register %u\n", r1); 9619 if (r2 >= nregs) 9620 err += efunc(pc, "invalid register %u\n", r2); 9621 if (rd >= nregs) 9622 err += efunc(pc, "invalid register %u\n", rd); 9623 if (rd == 0) 9624 err += efunc(pc, "cannot write to %r0\n"); 9625 break; 9626 case DIF_OP_NOT: 9627 case DIF_OP_MOV: 9628 case DIF_OP_ALLOCS: 9629 if (r1 >= nregs) 9630 err += efunc(pc, "invalid register %u\n", r1); 9631 if (r2 != 0) 9632 err += efunc(pc, "non-zero reserved bits\n"); 9633 if (rd >= nregs) 9634 err += efunc(pc, "invalid register %u\n", rd); 9635 if (rd == 0) 9636 err += efunc(pc, "cannot write to %r0\n"); 9637 break; 9638 case DIF_OP_LDSB: 9639 case DIF_OP_LDSH: 9640 case DIF_OP_LDSW: 9641 case DIF_OP_LDUB: 9642 case DIF_OP_LDUH: 9643 case DIF_OP_LDUW: 9644 case DIF_OP_LDX: 9645 if (r1 >= nregs) 9646 err += efunc(pc, "invalid register %u\n", r1); 9647 if (r2 != 0) 9648 err += efunc(pc, "non-zero reserved bits\n"); 9649 if (rd >= nregs) 9650 err += efunc(pc, "invalid register %u\n", rd); 9651 if (rd == 0) 9652 err += efunc(pc, "cannot write to %r0\n"); 9653 if (kcheckload) 9654 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 9655 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 9656 break; 9657 case DIF_OP_RLDSB: 9658 case DIF_OP_RLDSH: 9659 case DIF_OP_RLDSW: 9660 case DIF_OP_RLDUB: 9661 case DIF_OP_RLDUH: 9662 case DIF_OP_RLDUW: 9663 case DIF_OP_RLDX: 9664 if (r1 >= nregs) 9665 err += efunc(pc, "invalid register %u\n", r1); 9666 if (r2 != 0) 9667 err += efunc(pc, "non-zero reserved bits\n"); 9668 if (rd >= nregs) 9669 err += efunc(pc, "invalid register %u\n", rd); 9670 if (rd == 0) 9671 err += efunc(pc, "cannot write to %r0\n"); 9672 break; 9673 case DIF_OP_ULDSB: 9674 case DIF_OP_ULDSH: 9675 case DIF_OP_ULDSW: 9676 case DIF_OP_ULDUB: 9677 case DIF_OP_ULDUH: 9678 case DIF_OP_ULDUW: 9679 case DIF_OP_ULDX: 9680 if (r1 >= nregs) 9681 err += efunc(pc, "invalid register %u\n", r1); 9682 if (r2 != 0) 9683 err += efunc(pc, "non-zero reserved bits\n"); 9684 if (rd >= nregs) 9685 err += efunc(pc, "invalid register %u\n", rd); 9686 if (rd == 0) 9687 err += efunc(pc, "cannot write to %r0\n"); 9688 break; 9689 case DIF_OP_STB: 9690 case DIF_OP_STH: 9691 case DIF_OP_STW: 9692 case DIF_OP_STX: 9693 if (r1 >= nregs) 9694 err += efunc(pc, "invalid register %u\n", r1); 9695 if (r2 != 0) 9696 err += efunc(pc, "non-zero reserved bits\n"); 9697 if (rd >= nregs) 9698 err += efunc(pc, "invalid register %u\n", rd); 9699 if (rd == 0) 9700 err += efunc(pc, "cannot write to 0 address\n"); 9701 break; 9702 case DIF_OP_CMP: 9703 case DIF_OP_SCMP: 9704 if (r1 >= nregs) 9705 err += efunc(pc, "invalid register %u\n", r1); 9706 if (r2 >= nregs) 9707 err += efunc(pc, "invalid register %u\n", r2); 9708 if (rd != 0) 9709 err += efunc(pc, "non-zero reserved bits\n"); 9710 break; 9711 case DIF_OP_TST: 9712 if (r1 >= nregs) 9713 err += efunc(pc, "invalid register %u\n", r1); 9714 if (r2 != 0 || rd != 0) 9715 err += efunc(pc, "non-zero reserved bits\n"); 9716 break; 9717 case DIF_OP_BA: 9718 case DIF_OP_BE: 9719 case DIF_OP_BNE: 9720 case DIF_OP_BG: 9721 case DIF_OP_BGU: 9722 case DIF_OP_BGE: 9723 case DIF_OP_BGEU: 9724 case DIF_OP_BL: 9725 case DIF_OP_BLU: 9726 case DIF_OP_BLE: 9727 case DIF_OP_BLEU: 9728 if (label >= dp->dtdo_len) { 9729 err += efunc(pc, "invalid branch target %u\n", 9730 label); 9731 } 9732 if (label <= pc) { 9733 err += efunc(pc, "backward branch to %u\n", 9734 label); 9735 } 9736 break; 9737 case DIF_OP_RET: 9738 if (r1 != 0 || r2 != 0) 9739 err += efunc(pc, "non-zero reserved bits\n"); 9740 if (rd >= nregs) 9741 err += efunc(pc, "invalid register %u\n", rd); 9742 break; 9743 case DIF_OP_NOP: 9744 case DIF_OP_POPTS: 9745 case DIF_OP_FLUSHTS: 9746 if (r1 != 0 || r2 != 0 || rd != 0) 9747 err += efunc(pc, "non-zero reserved bits\n"); 9748 break; 9749 case DIF_OP_SETX: 9750 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 9751 err += efunc(pc, "invalid integer ref %u\n", 9752 DIF_INSTR_INTEGER(instr)); 9753 } 9754 if (rd >= nregs) 9755 err += efunc(pc, "invalid register %u\n", rd); 9756 if (rd == 0) 9757 err += efunc(pc, "cannot write to %r0\n"); 9758 break; 9759 case DIF_OP_SETS: 9760 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 9761 err += efunc(pc, "invalid string ref %u\n", 9762 DIF_INSTR_STRING(instr)); 9763 } 9764 if (rd >= nregs) 9765 err += efunc(pc, "invalid register %u\n", rd); 9766 if (rd == 0) 9767 err += efunc(pc, "cannot write to %r0\n"); 9768 break; 9769 case DIF_OP_LDGA: 9770 case DIF_OP_LDTA: 9771 if (r1 > DIF_VAR_ARRAY_MAX) 9772 err += efunc(pc, "invalid array %u\n", r1); 9773 if (r2 >= nregs) 9774 err += efunc(pc, "invalid register %u\n", r2); 9775 if (rd >= nregs) 9776 err += efunc(pc, "invalid register %u\n", rd); 9777 if (rd == 0) 9778 err += efunc(pc, "cannot write to %r0\n"); 9779 break; 9780 case DIF_OP_LDGS: 9781 case DIF_OP_LDTS: 9782 case DIF_OP_LDLS: 9783 case DIF_OP_LDGAA: 9784 case DIF_OP_LDTAA: 9785 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 9786 err += efunc(pc, "invalid variable %u\n", v); 9787 if (rd >= nregs) 9788 err += efunc(pc, "invalid register %u\n", rd); 9789 if (rd == 0) 9790 err += efunc(pc, "cannot write to %r0\n"); 9791 break; 9792 case DIF_OP_STGS: 9793 case DIF_OP_STTS: 9794 case DIF_OP_STLS: 9795 case DIF_OP_STGAA: 9796 case DIF_OP_STTAA: 9797 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 9798 err += efunc(pc, "invalid variable %u\n", v); 9799 if (rs >= nregs) 9800 err += efunc(pc, "invalid register %u\n", rd); 9801 break; 9802 case DIF_OP_CALL: 9803 if (subr > DIF_SUBR_MAX) 9804 err += efunc(pc, "invalid subr %u\n", subr); 9805 if (rd >= nregs) 9806 err += efunc(pc, "invalid register %u\n", rd); 9807 if (rd == 0) 9808 err += efunc(pc, "cannot write to %r0\n"); 9809 9810 if (subr == DIF_SUBR_COPYOUT || 9811 subr == DIF_SUBR_COPYOUTSTR) { 9812 dp->dtdo_destructive = 1; 9813 } 9814 9815 if (subr == DIF_SUBR_GETF) { 9816 /* 9817 * If we have a getf() we need to record that 9818 * in our state. Note that our state can be 9819 * NULL if this is a helper -- but in that 9820 * case, the call to getf() is itself illegal, 9821 * and will be caught (slightly later) when 9822 * the helper is validated. 9823 */ 9824 if (vstate->dtvs_state != NULL) 9825 vstate->dtvs_state->dts_getf++; 9826 } 9827 9828 break; 9829 case DIF_OP_PUSHTR: 9830 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 9831 err += efunc(pc, "invalid ref type %u\n", type); 9832 if (r2 >= nregs) 9833 err += efunc(pc, "invalid register %u\n", r2); 9834 if (rs >= nregs) 9835 err += efunc(pc, "invalid register %u\n", rs); 9836 break; 9837 case DIF_OP_PUSHTV: 9838 if (type != DIF_TYPE_CTF) 9839 err += efunc(pc, "invalid val type %u\n", type); 9840 if (r2 >= nregs) 9841 err += efunc(pc, "invalid register %u\n", r2); 9842 if (rs >= nregs) 9843 err += efunc(pc, "invalid register %u\n", rs); 9844 break; 9845 default: 9846 err += efunc(pc, "invalid opcode %u\n", 9847 DIF_INSTR_OP(instr)); 9848 } 9849 } 9850 9851 if (dp->dtdo_len != 0 && 9852 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 9853 err += efunc(dp->dtdo_len - 1, 9854 "expected 'ret' as last DIF instruction\n"); 9855 } 9856 9857 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { 9858 /* 9859 * If we're not returning by reference, the size must be either 9860 * 0 or the size of one of the base types. 9861 */ 9862 switch (dp->dtdo_rtype.dtdt_size) { 9863 case 0: 9864 case sizeof (uint8_t): 9865 case sizeof (uint16_t): 9866 case sizeof (uint32_t): 9867 case sizeof (uint64_t): 9868 break; 9869 9870 default: 9871 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 9872 } 9873 } 9874 9875 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 9876 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 9877 dtrace_diftype_t *vt, *et; 9878 uint_t id, ndx; 9879 9880 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 9881 v->dtdv_scope != DIFV_SCOPE_THREAD && 9882 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 9883 err += efunc(i, "unrecognized variable scope %d\n", 9884 v->dtdv_scope); 9885 break; 9886 } 9887 9888 if (v->dtdv_kind != DIFV_KIND_ARRAY && 9889 v->dtdv_kind != DIFV_KIND_SCALAR) { 9890 err += efunc(i, "unrecognized variable type %d\n", 9891 v->dtdv_kind); 9892 break; 9893 } 9894 9895 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 9896 err += efunc(i, "%d exceeds variable id limit\n", id); 9897 break; 9898 } 9899 9900 if (id < DIF_VAR_OTHER_UBASE) 9901 continue; 9902 9903 /* 9904 * For user-defined variables, we need to check that this 9905 * definition is identical to any previous definition that we 9906 * encountered. 9907 */ 9908 ndx = id - DIF_VAR_OTHER_UBASE; 9909 9910 switch (v->dtdv_scope) { 9911 case DIFV_SCOPE_GLOBAL: 9912 if (ndx < vstate->dtvs_nglobals) { 9913 dtrace_statvar_t *svar; 9914 9915 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 9916 existing = &svar->dtsv_var; 9917 } 9918 9919 break; 9920 9921 case DIFV_SCOPE_THREAD: 9922 if (ndx < vstate->dtvs_ntlocals) 9923 existing = &vstate->dtvs_tlocals[ndx]; 9924 break; 9925 9926 case DIFV_SCOPE_LOCAL: 9927 if (ndx < vstate->dtvs_nlocals) { 9928 dtrace_statvar_t *svar; 9929 9930 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 9931 existing = &svar->dtsv_var; 9932 } 9933 9934 break; 9935 } 9936 9937 vt = &v->dtdv_type; 9938 9939 if (vt->dtdt_flags & DIF_TF_BYREF) { 9940 if (vt->dtdt_size == 0) { 9941 err += efunc(i, "zero-sized variable\n"); 9942 break; 9943 } 9944 9945 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || 9946 v->dtdv_scope == DIFV_SCOPE_LOCAL) && 9947 vt->dtdt_size > dtrace_statvar_maxsize) { 9948 err += efunc(i, "oversized by-ref static\n"); 9949 break; 9950 } 9951 } 9952 9953 if (existing == NULL || existing->dtdv_id == 0) 9954 continue; 9955 9956 ASSERT(existing->dtdv_id == v->dtdv_id); 9957 ASSERT(existing->dtdv_scope == v->dtdv_scope); 9958 9959 if (existing->dtdv_kind != v->dtdv_kind) 9960 err += efunc(i, "%d changed variable kind\n", id); 9961 9962 et = &existing->dtdv_type; 9963 9964 if (vt->dtdt_flags != et->dtdt_flags) { 9965 err += efunc(i, "%d changed variable type flags\n", id); 9966 break; 9967 } 9968 9969 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 9970 err += efunc(i, "%d changed variable type size\n", id); 9971 break; 9972 } 9973 } 9974 9975 return (err); 9976 } 9977 9978 /* 9979 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 9980 * are much more constrained than normal DIFOs. Specifically, they may 9981 * not: 9982 * 9983 * 1. Make calls to subroutines other than copyin(), copyinstr() or 9984 * miscellaneous string routines 9985 * 2. Access DTrace variables other than the args[] array, and the 9986 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 9987 * 3. Have thread-local variables. 9988 * 4. Have dynamic variables. 9989 */ 9990 static int 9991 dtrace_difo_validate_helper(dtrace_difo_t *dp) 9992 { 9993 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 9994 int err = 0; 9995 uint_t pc; 9996 9997 for (pc = 0; pc < dp->dtdo_len; pc++) { 9998 dif_instr_t instr = dp->dtdo_buf[pc]; 9999 10000 uint_t v = DIF_INSTR_VAR(instr); 10001 uint_t subr = DIF_INSTR_SUBR(instr); 10002 uint_t op = DIF_INSTR_OP(instr); 10003 10004 switch (op) { 10005 case DIF_OP_OR: 10006 case DIF_OP_XOR: 10007 case DIF_OP_AND: 10008 case DIF_OP_SLL: 10009 case DIF_OP_SRL: 10010 case DIF_OP_SRA: 10011 case DIF_OP_SUB: 10012 case DIF_OP_ADD: 10013 case DIF_OP_MUL: 10014 case DIF_OP_SDIV: 10015 case DIF_OP_UDIV: 10016 case DIF_OP_SREM: 10017 case DIF_OP_UREM: 10018 case DIF_OP_COPYS: 10019 case DIF_OP_NOT: 10020 case DIF_OP_MOV: 10021 case DIF_OP_RLDSB: 10022 case DIF_OP_RLDSH: 10023 case DIF_OP_RLDSW: 10024 case DIF_OP_RLDUB: 10025 case DIF_OP_RLDUH: 10026 case DIF_OP_RLDUW: 10027 case DIF_OP_RLDX: 10028 case DIF_OP_ULDSB: 10029 case DIF_OP_ULDSH: 10030 case DIF_OP_ULDSW: 10031 case DIF_OP_ULDUB: 10032 case DIF_OP_ULDUH: 10033 case DIF_OP_ULDUW: 10034 case DIF_OP_ULDX: 10035 case DIF_OP_STB: 10036 case DIF_OP_STH: 10037 case DIF_OP_STW: 10038 case DIF_OP_STX: 10039 case DIF_OP_ALLOCS: 10040 case DIF_OP_CMP: 10041 case DIF_OP_SCMP: 10042 case DIF_OP_TST: 10043 case DIF_OP_BA: 10044 case DIF_OP_BE: 10045 case DIF_OP_BNE: 10046 case DIF_OP_BG: 10047 case DIF_OP_BGU: 10048 case DIF_OP_BGE: 10049 case DIF_OP_BGEU: 10050 case DIF_OP_BL: 10051 case DIF_OP_BLU: 10052 case DIF_OP_BLE: 10053 case DIF_OP_BLEU: 10054 case DIF_OP_RET: 10055 case DIF_OP_NOP: 10056 case DIF_OP_POPTS: 10057 case DIF_OP_FLUSHTS: 10058 case DIF_OP_SETX: 10059 case DIF_OP_SETS: 10060 case DIF_OP_LDGA: 10061 case DIF_OP_LDLS: 10062 case DIF_OP_STGS: 10063 case DIF_OP_STLS: 10064 case DIF_OP_PUSHTR: 10065 case DIF_OP_PUSHTV: 10066 break; 10067 10068 case DIF_OP_LDGS: 10069 if (v >= DIF_VAR_OTHER_UBASE) 10070 break; 10071 10072 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 10073 break; 10074 10075 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 10076 v == DIF_VAR_PPID || v == DIF_VAR_TID || 10077 v == DIF_VAR_EXECARGS || 10078 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 10079 v == DIF_VAR_UID || v == DIF_VAR_GID) 10080 break; 10081 10082 err += efunc(pc, "illegal variable %u\n", v); 10083 break; 10084 10085 case DIF_OP_LDTA: 10086 case DIF_OP_LDTS: 10087 case DIF_OP_LDGAA: 10088 case DIF_OP_LDTAA: 10089 err += efunc(pc, "illegal dynamic variable load\n"); 10090 break; 10091 10092 case DIF_OP_STTS: 10093 case DIF_OP_STGAA: 10094 case DIF_OP_STTAA: 10095 err += efunc(pc, "illegal dynamic variable store\n"); 10096 break; 10097 10098 case DIF_OP_CALL: 10099 if (subr == DIF_SUBR_ALLOCA || 10100 subr == DIF_SUBR_BCOPY || 10101 subr == DIF_SUBR_COPYIN || 10102 subr == DIF_SUBR_COPYINTO || 10103 subr == DIF_SUBR_COPYINSTR || 10104 subr == DIF_SUBR_INDEX || 10105 subr == DIF_SUBR_INET_NTOA || 10106 subr == DIF_SUBR_INET_NTOA6 || 10107 subr == DIF_SUBR_INET_NTOP || 10108 subr == DIF_SUBR_JSON || 10109 subr == DIF_SUBR_LLTOSTR || 10110 subr == DIF_SUBR_STRTOLL || 10111 subr == DIF_SUBR_RINDEX || 10112 subr == DIF_SUBR_STRCHR || 10113 subr == DIF_SUBR_STRJOIN || 10114 subr == DIF_SUBR_STRRCHR || 10115 subr == DIF_SUBR_STRSTR || 10116 subr == DIF_SUBR_HTONS || 10117 subr == DIF_SUBR_HTONL || 10118 subr == DIF_SUBR_HTONLL || 10119 subr == DIF_SUBR_NTOHS || 10120 subr == DIF_SUBR_NTOHL || 10121 subr == DIF_SUBR_NTOHLL || 10122 subr == DIF_SUBR_MEMREF || 10123 #ifndef illumos 10124 subr == DIF_SUBR_MEMSTR || 10125 #endif 10126 subr == DIF_SUBR_TYPEREF) 10127 break; 10128 10129 err += efunc(pc, "invalid subr %u\n", subr); 10130 break; 10131 10132 default: 10133 err += efunc(pc, "invalid opcode %u\n", 10134 DIF_INSTR_OP(instr)); 10135 } 10136 } 10137 10138 return (err); 10139 } 10140 10141 /* 10142 * Returns 1 if the expression in the DIF object can be cached on a per-thread 10143 * basis; 0 if not. 10144 */ 10145 static int 10146 dtrace_difo_cacheable(dtrace_difo_t *dp) 10147 { 10148 int i; 10149 10150 if (dp == NULL) 10151 return (0); 10152 10153 for (i = 0; i < dp->dtdo_varlen; i++) { 10154 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10155 10156 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 10157 continue; 10158 10159 switch (v->dtdv_id) { 10160 case DIF_VAR_CURTHREAD: 10161 case DIF_VAR_PID: 10162 case DIF_VAR_TID: 10163 case DIF_VAR_EXECARGS: 10164 case DIF_VAR_EXECNAME: 10165 case DIF_VAR_ZONENAME: 10166 break; 10167 10168 default: 10169 return (0); 10170 } 10171 } 10172 10173 /* 10174 * This DIF object may be cacheable. Now we need to look for any 10175 * array loading instructions, any memory loading instructions, or 10176 * any stores to thread-local variables. 10177 */ 10178 for (i = 0; i < dp->dtdo_len; i++) { 10179 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 10180 10181 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 10182 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 10183 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 10184 op == DIF_OP_LDGA || op == DIF_OP_STTS) 10185 return (0); 10186 } 10187 10188 return (1); 10189 } 10190 10191 static void 10192 dtrace_difo_hold(dtrace_difo_t *dp) 10193 { 10194 int i; 10195 10196 ASSERT(MUTEX_HELD(&dtrace_lock)); 10197 10198 dp->dtdo_refcnt++; 10199 ASSERT(dp->dtdo_refcnt != 0); 10200 10201 /* 10202 * We need to check this DIF object for references to the variable 10203 * DIF_VAR_VTIMESTAMP. 10204 */ 10205 for (i = 0; i < dp->dtdo_varlen; i++) { 10206 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10207 10208 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10209 continue; 10210 10211 if (dtrace_vtime_references++ == 0) 10212 dtrace_vtime_enable(); 10213 } 10214 } 10215 10216 /* 10217 * This routine calculates the dynamic variable chunksize for a given DIF 10218 * object. The calculation is not fool-proof, and can probably be tricked by 10219 * malicious DIF -- but it works for all compiler-generated DIF. Because this 10220 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 10221 * if a dynamic variable size exceeds the chunksize. 10222 */ 10223 static void 10224 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10225 { 10226 uint64_t sval = 0; 10227 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 10228 const dif_instr_t *text = dp->dtdo_buf; 10229 uint_t pc, srd = 0; 10230 uint_t ttop = 0; 10231 size_t size, ksize; 10232 uint_t id, i; 10233 10234 for (pc = 0; pc < dp->dtdo_len; pc++) { 10235 dif_instr_t instr = text[pc]; 10236 uint_t op = DIF_INSTR_OP(instr); 10237 uint_t rd = DIF_INSTR_RD(instr); 10238 uint_t r1 = DIF_INSTR_R1(instr); 10239 uint_t nkeys = 0; 10240 uchar_t scope = 0; 10241 10242 dtrace_key_t *key = tupregs; 10243 10244 switch (op) { 10245 case DIF_OP_SETX: 10246 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 10247 srd = rd; 10248 continue; 10249 10250 case DIF_OP_STTS: 10251 key = &tupregs[DIF_DTR_NREGS]; 10252 key[0].dttk_size = 0; 10253 key[1].dttk_size = 0; 10254 nkeys = 2; 10255 scope = DIFV_SCOPE_THREAD; 10256 break; 10257 10258 case DIF_OP_STGAA: 10259 case DIF_OP_STTAA: 10260 nkeys = ttop; 10261 10262 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 10263 key[nkeys++].dttk_size = 0; 10264 10265 key[nkeys++].dttk_size = 0; 10266 10267 if (op == DIF_OP_STTAA) { 10268 scope = DIFV_SCOPE_THREAD; 10269 } else { 10270 scope = DIFV_SCOPE_GLOBAL; 10271 } 10272 10273 break; 10274 10275 case DIF_OP_PUSHTR: 10276 if (ttop == DIF_DTR_NREGS) 10277 return; 10278 10279 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 10280 /* 10281 * If the register for the size of the "pushtr" 10282 * is %r0 (or the value is 0) and the type is 10283 * a string, we'll use the system-wide default 10284 * string size. 10285 */ 10286 tupregs[ttop++].dttk_size = 10287 dtrace_strsize_default; 10288 } else { 10289 if (srd == 0) 10290 return; 10291 10292 if (sval > LONG_MAX) 10293 return; 10294 10295 tupregs[ttop++].dttk_size = sval; 10296 } 10297 10298 break; 10299 10300 case DIF_OP_PUSHTV: 10301 if (ttop == DIF_DTR_NREGS) 10302 return; 10303 10304 tupregs[ttop++].dttk_size = 0; 10305 break; 10306 10307 case DIF_OP_FLUSHTS: 10308 ttop = 0; 10309 break; 10310 10311 case DIF_OP_POPTS: 10312 if (ttop != 0) 10313 ttop--; 10314 break; 10315 } 10316 10317 sval = 0; 10318 srd = 0; 10319 10320 if (nkeys == 0) 10321 continue; 10322 10323 /* 10324 * We have a dynamic variable allocation; calculate its size. 10325 */ 10326 for (ksize = 0, i = 0; i < nkeys; i++) 10327 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 10328 10329 size = sizeof (dtrace_dynvar_t); 10330 size += sizeof (dtrace_key_t) * (nkeys - 1); 10331 size += ksize; 10332 10333 /* 10334 * Now we need to determine the size of the stored data. 10335 */ 10336 id = DIF_INSTR_VAR(instr); 10337 10338 for (i = 0; i < dp->dtdo_varlen; i++) { 10339 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10340 10341 if (v->dtdv_id == id && v->dtdv_scope == scope) { 10342 size += v->dtdv_type.dtdt_size; 10343 break; 10344 } 10345 } 10346 10347 if (i == dp->dtdo_varlen) 10348 return; 10349 10350 /* 10351 * We have the size. If this is larger than the chunk size 10352 * for our dynamic variable state, reset the chunk size. 10353 */ 10354 size = P2ROUNDUP(size, sizeof (uint64_t)); 10355 10356 /* 10357 * Before setting the chunk size, check that we're not going 10358 * to set it to a negative value... 10359 */ 10360 if (size > LONG_MAX) 10361 return; 10362 10363 /* 10364 * ...and make certain that we didn't badly overflow. 10365 */ 10366 if (size < ksize || size < sizeof (dtrace_dynvar_t)) 10367 return; 10368 10369 if (size > vstate->dtvs_dynvars.dtds_chunksize) 10370 vstate->dtvs_dynvars.dtds_chunksize = size; 10371 } 10372 } 10373 10374 static void 10375 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10376 { 10377 int i, oldsvars, osz, nsz, otlocals, ntlocals; 10378 uint_t id; 10379 10380 ASSERT(MUTEX_HELD(&dtrace_lock)); 10381 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 10382 10383 for (i = 0; i < dp->dtdo_varlen; i++) { 10384 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10385 dtrace_statvar_t *svar, ***svarp = NULL; 10386 size_t dsize = 0; 10387 uint8_t scope = v->dtdv_scope; 10388 int *np = NULL; 10389 10390 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10391 continue; 10392 10393 id -= DIF_VAR_OTHER_UBASE; 10394 10395 switch (scope) { 10396 case DIFV_SCOPE_THREAD: 10397 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 10398 dtrace_difv_t *tlocals; 10399 10400 if ((ntlocals = (otlocals << 1)) == 0) 10401 ntlocals = 1; 10402 10403 osz = otlocals * sizeof (dtrace_difv_t); 10404 nsz = ntlocals * sizeof (dtrace_difv_t); 10405 10406 tlocals = kmem_zalloc(nsz, KM_SLEEP); 10407 10408 if (osz != 0) { 10409 bcopy(vstate->dtvs_tlocals, 10410 tlocals, osz); 10411 kmem_free(vstate->dtvs_tlocals, osz); 10412 } 10413 10414 vstate->dtvs_tlocals = tlocals; 10415 vstate->dtvs_ntlocals = ntlocals; 10416 } 10417 10418 vstate->dtvs_tlocals[id] = *v; 10419 continue; 10420 10421 case DIFV_SCOPE_LOCAL: 10422 np = &vstate->dtvs_nlocals; 10423 svarp = &vstate->dtvs_locals; 10424 10425 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10426 dsize = NCPU * (v->dtdv_type.dtdt_size + 10427 sizeof (uint64_t)); 10428 else 10429 dsize = NCPU * sizeof (uint64_t); 10430 10431 break; 10432 10433 case DIFV_SCOPE_GLOBAL: 10434 np = &vstate->dtvs_nglobals; 10435 svarp = &vstate->dtvs_globals; 10436 10437 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 10438 dsize = v->dtdv_type.dtdt_size + 10439 sizeof (uint64_t); 10440 10441 break; 10442 10443 default: 10444 ASSERT(0); 10445 } 10446 10447 while (id >= (oldsvars = *np)) { 10448 dtrace_statvar_t **statics; 10449 int newsvars, oldsize, newsize; 10450 10451 if ((newsvars = (oldsvars << 1)) == 0) 10452 newsvars = 1; 10453 10454 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 10455 newsize = newsvars * sizeof (dtrace_statvar_t *); 10456 10457 statics = kmem_zalloc(newsize, KM_SLEEP); 10458 10459 if (oldsize != 0) { 10460 bcopy(*svarp, statics, oldsize); 10461 kmem_free(*svarp, oldsize); 10462 } 10463 10464 *svarp = statics; 10465 *np = newsvars; 10466 } 10467 10468 if ((svar = (*svarp)[id]) == NULL) { 10469 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 10470 svar->dtsv_var = *v; 10471 10472 if ((svar->dtsv_size = dsize) != 0) { 10473 svar->dtsv_data = (uint64_t)(uintptr_t) 10474 kmem_zalloc(dsize, KM_SLEEP); 10475 } 10476 10477 (*svarp)[id] = svar; 10478 } 10479 10480 svar->dtsv_refcnt++; 10481 } 10482 10483 dtrace_difo_chunksize(dp, vstate); 10484 dtrace_difo_hold(dp); 10485 } 10486 10487 static dtrace_difo_t * 10488 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10489 { 10490 dtrace_difo_t *new; 10491 size_t sz; 10492 10493 ASSERT(dp->dtdo_buf != NULL); 10494 ASSERT(dp->dtdo_refcnt != 0); 10495 10496 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10497 10498 ASSERT(dp->dtdo_buf != NULL); 10499 sz = dp->dtdo_len * sizeof (dif_instr_t); 10500 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 10501 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 10502 new->dtdo_len = dp->dtdo_len; 10503 10504 if (dp->dtdo_strtab != NULL) { 10505 ASSERT(dp->dtdo_strlen != 0); 10506 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 10507 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 10508 new->dtdo_strlen = dp->dtdo_strlen; 10509 } 10510 10511 if (dp->dtdo_inttab != NULL) { 10512 ASSERT(dp->dtdo_intlen != 0); 10513 sz = dp->dtdo_intlen * sizeof (uint64_t); 10514 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 10515 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 10516 new->dtdo_intlen = dp->dtdo_intlen; 10517 } 10518 10519 if (dp->dtdo_vartab != NULL) { 10520 ASSERT(dp->dtdo_varlen != 0); 10521 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 10522 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 10523 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 10524 new->dtdo_varlen = dp->dtdo_varlen; 10525 } 10526 10527 dtrace_difo_init(new, vstate); 10528 return (new); 10529 } 10530 10531 static void 10532 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10533 { 10534 int i; 10535 10536 ASSERT(dp->dtdo_refcnt == 0); 10537 10538 for (i = 0; i < dp->dtdo_varlen; i++) { 10539 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10540 dtrace_statvar_t *svar, **svarp = NULL; 10541 uint_t id; 10542 uint8_t scope = v->dtdv_scope; 10543 int *np = NULL; 10544 10545 switch (scope) { 10546 case DIFV_SCOPE_THREAD: 10547 continue; 10548 10549 case DIFV_SCOPE_LOCAL: 10550 np = &vstate->dtvs_nlocals; 10551 svarp = vstate->dtvs_locals; 10552 break; 10553 10554 case DIFV_SCOPE_GLOBAL: 10555 np = &vstate->dtvs_nglobals; 10556 svarp = vstate->dtvs_globals; 10557 break; 10558 10559 default: 10560 ASSERT(0); 10561 } 10562 10563 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 10564 continue; 10565 10566 id -= DIF_VAR_OTHER_UBASE; 10567 ASSERT(id < *np); 10568 10569 svar = svarp[id]; 10570 ASSERT(svar != NULL); 10571 ASSERT(svar->dtsv_refcnt > 0); 10572 10573 if (--svar->dtsv_refcnt > 0) 10574 continue; 10575 10576 if (svar->dtsv_size != 0) { 10577 ASSERT(svar->dtsv_data != 0); 10578 kmem_free((void *)(uintptr_t)svar->dtsv_data, 10579 svar->dtsv_size); 10580 } 10581 10582 kmem_free(svar, sizeof (dtrace_statvar_t)); 10583 svarp[id] = NULL; 10584 } 10585 10586 if (dp->dtdo_buf != NULL) 10587 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10588 if (dp->dtdo_inttab != NULL) 10589 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10590 if (dp->dtdo_strtab != NULL) 10591 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10592 if (dp->dtdo_vartab != NULL) 10593 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10594 10595 kmem_free(dp, sizeof (dtrace_difo_t)); 10596 } 10597 10598 static void 10599 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 10600 { 10601 int i; 10602 10603 ASSERT(MUTEX_HELD(&dtrace_lock)); 10604 ASSERT(dp->dtdo_refcnt != 0); 10605 10606 for (i = 0; i < dp->dtdo_varlen; i++) { 10607 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10608 10609 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 10610 continue; 10611 10612 ASSERT(dtrace_vtime_references > 0); 10613 if (--dtrace_vtime_references == 0) 10614 dtrace_vtime_disable(); 10615 } 10616 10617 if (--dp->dtdo_refcnt == 0) 10618 dtrace_difo_destroy(dp, vstate); 10619 } 10620 10621 /* 10622 * DTrace Format Functions 10623 */ 10624 static uint16_t 10625 dtrace_format_add(dtrace_state_t *state, char *str) 10626 { 10627 char *fmt, **new; 10628 uint16_t ndx, len = strlen(str) + 1; 10629 10630 fmt = kmem_zalloc(len, KM_SLEEP); 10631 bcopy(str, fmt, len); 10632 10633 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 10634 if (state->dts_formats[ndx] == NULL) { 10635 state->dts_formats[ndx] = fmt; 10636 return (ndx + 1); 10637 } 10638 } 10639 10640 if (state->dts_nformats == USHRT_MAX) { 10641 /* 10642 * This is only likely if a denial-of-service attack is being 10643 * attempted. As such, it's okay to fail silently here. 10644 */ 10645 kmem_free(fmt, len); 10646 return (0); 10647 } 10648 10649 /* 10650 * For simplicity, we always resize the formats array to be exactly the 10651 * number of formats. 10652 */ 10653 ndx = state->dts_nformats++; 10654 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 10655 10656 if (state->dts_formats != NULL) { 10657 ASSERT(ndx != 0); 10658 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 10659 kmem_free(state->dts_formats, ndx * sizeof (char *)); 10660 } 10661 10662 state->dts_formats = new; 10663 state->dts_formats[ndx] = fmt; 10664 10665 return (ndx + 1); 10666 } 10667 10668 static void 10669 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 10670 { 10671 char *fmt; 10672 10673 ASSERT(state->dts_formats != NULL); 10674 ASSERT(format <= state->dts_nformats); 10675 ASSERT(state->dts_formats[format - 1] != NULL); 10676 10677 fmt = state->dts_formats[format - 1]; 10678 kmem_free(fmt, strlen(fmt) + 1); 10679 state->dts_formats[format - 1] = NULL; 10680 } 10681 10682 static void 10683 dtrace_format_destroy(dtrace_state_t *state) 10684 { 10685 int i; 10686 10687 if (state->dts_nformats == 0) { 10688 ASSERT(state->dts_formats == NULL); 10689 return; 10690 } 10691 10692 ASSERT(state->dts_formats != NULL); 10693 10694 for (i = 0; i < state->dts_nformats; i++) { 10695 char *fmt = state->dts_formats[i]; 10696 10697 if (fmt == NULL) 10698 continue; 10699 10700 kmem_free(fmt, strlen(fmt) + 1); 10701 } 10702 10703 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 10704 state->dts_nformats = 0; 10705 state->dts_formats = NULL; 10706 } 10707 10708 /* 10709 * DTrace Predicate Functions 10710 */ 10711 static dtrace_predicate_t * 10712 dtrace_predicate_create(dtrace_difo_t *dp) 10713 { 10714 dtrace_predicate_t *pred; 10715 10716 ASSERT(MUTEX_HELD(&dtrace_lock)); 10717 ASSERT(dp->dtdo_refcnt != 0); 10718 10719 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 10720 pred->dtp_difo = dp; 10721 pred->dtp_refcnt = 1; 10722 10723 if (!dtrace_difo_cacheable(dp)) 10724 return (pred); 10725 10726 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 10727 /* 10728 * This is only theoretically possible -- we have had 2^32 10729 * cacheable predicates on this machine. We cannot allow any 10730 * more predicates to become cacheable: as unlikely as it is, 10731 * there may be a thread caching a (now stale) predicate cache 10732 * ID. (N.B.: the temptation is being successfully resisted to 10733 * have this cmn_err() "Holy shit -- we executed this code!") 10734 */ 10735 return (pred); 10736 } 10737 10738 pred->dtp_cacheid = dtrace_predcache_id++; 10739 10740 return (pred); 10741 } 10742 10743 static void 10744 dtrace_predicate_hold(dtrace_predicate_t *pred) 10745 { 10746 ASSERT(MUTEX_HELD(&dtrace_lock)); 10747 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 10748 ASSERT(pred->dtp_refcnt > 0); 10749 10750 pred->dtp_refcnt++; 10751 } 10752 10753 static void 10754 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 10755 { 10756 dtrace_difo_t *dp = pred->dtp_difo; 10757 10758 ASSERT(MUTEX_HELD(&dtrace_lock)); 10759 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 10760 ASSERT(pred->dtp_refcnt > 0); 10761 10762 if (--pred->dtp_refcnt == 0) { 10763 dtrace_difo_release(pred->dtp_difo, vstate); 10764 kmem_free(pred, sizeof (dtrace_predicate_t)); 10765 } 10766 } 10767 10768 /* 10769 * DTrace Action Description Functions 10770 */ 10771 static dtrace_actdesc_t * 10772 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 10773 uint64_t uarg, uint64_t arg) 10774 { 10775 dtrace_actdesc_t *act; 10776 10777 #ifdef illumos 10778 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 10779 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 10780 #endif 10781 10782 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 10783 act->dtad_kind = kind; 10784 act->dtad_ntuple = ntuple; 10785 act->dtad_uarg = uarg; 10786 act->dtad_arg = arg; 10787 act->dtad_refcnt = 1; 10788 10789 return (act); 10790 } 10791 10792 static void 10793 dtrace_actdesc_hold(dtrace_actdesc_t *act) 10794 { 10795 ASSERT(act->dtad_refcnt >= 1); 10796 act->dtad_refcnt++; 10797 } 10798 10799 static void 10800 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 10801 { 10802 dtrace_actkind_t kind = act->dtad_kind; 10803 dtrace_difo_t *dp; 10804 10805 ASSERT(act->dtad_refcnt >= 1); 10806 10807 if (--act->dtad_refcnt != 0) 10808 return; 10809 10810 if ((dp = act->dtad_difo) != NULL) 10811 dtrace_difo_release(dp, vstate); 10812 10813 if (DTRACEACT_ISPRINTFLIKE(kind)) { 10814 char *str = (char *)(uintptr_t)act->dtad_arg; 10815 10816 #ifdef illumos 10817 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 10818 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 10819 #endif 10820 10821 if (str != NULL) 10822 kmem_free(str, strlen(str) + 1); 10823 } 10824 10825 kmem_free(act, sizeof (dtrace_actdesc_t)); 10826 } 10827 10828 /* 10829 * DTrace ECB Functions 10830 */ 10831 static dtrace_ecb_t * 10832 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 10833 { 10834 dtrace_ecb_t *ecb; 10835 dtrace_epid_t epid; 10836 10837 ASSERT(MUTEX_HELD(&dtrace_lock)); 10838 10839 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 10840 ecb->dte_predicate = NULL; 10841 ecb->dte_probe = probe; 10842 10843 /* 10844 * The default size is the size of the default action: recording 10845 * the header. 10846 */ 10847 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 10848 ecb->dte_alignment = sizeof (dtrace_epid_t); 10849 10850 epid = state->dts_epid++; 10851 10852 if (epid - 1 >= state->dts_necbs) { 10853 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 10854 int necbs = state->dts_necbs << 1; 10855 10856 ASSERT(epid == state->dts_necbs + 1); 10857 10858 if (necbs == 0) { 10859 ASSERT(oecbs == NULL); 10860 necbs = 1; 10861 } 10862 10863 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 10864 10865 if (oecbs != NULL) 10866 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 10867 10868 dtrace_membar_producer(); 10869 state->dts_ecbs = ecbs; 10870 10871 if (oecbs != NULL) { 10872 /* 10873 * If this state is active, we must dtrace_sync() 10874 * before we can free the old dts_ecbs array: we're 10875 * coming in hot, and there may be active ring 10876 * buffer processing (which indexes into the dts_ecbs 10877 * array) on another CPU. 10878 */ 10879 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 10880 dtrace_sync(); 10881 10882 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 10883 } 10884 10885 dtrace_membar_producer(); 10886 state->dts_necbs = necbs; 10887 } 10888 10889 ecb->dte_state = state; 10890 10891 ASSERT(state->dts_ecbs[epid - 1] == NULL); 10892 dtrace_membar_producer(); 10893 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 10894 10895 return (ecb); 10896 } 10897 10898 static void 10899 dtrace_ecb_enable(dtrace_ecb_t *ecb) 10900 { 10901 dtrace_probe_t *probe = ecb->dte_probe; 10902 10903 ASSERT(MUTEX_HELD(&cpu_lock)); 10904 ASSERT(MUTEX_HELD(&dtrace_lock)); 10905 ASSERT(ecb->dte_next == NULL); 10906 10907 if (probe == NULL) { 10908 /* 10909 * This is the NULL probe -- there's nothing to do. 10910 */ 10911 return; 10912 } 10913 10914 if (probe->dtpr_ecb == NULL) { 10915 dtrace_provider_t *prov = probe->dtpr_provider; 10916 10917 /* 10918 * We're the first ECB on this probe. 10919 */ 10920 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 10921 10922 if (ecb->dte_predicate != NULL) 10923 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 10924 10925 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 10926 probe->dtpr_id, probe->dtpr_arg); 10927 } else { 10928 /* 10929 * This probe is already active. Swing the last pointer to 10930 * point to the new ECB, and issue a dtrace_sync() to assure 10931 * that all CPUs have seen the change. 10932 */ 10933 ASSERT(probe->dtpr_ecb_last != NULL); 10934 probe->dtpr_ecb_last->dte_next = ecb; 10935 probe->dtpr_ecb_last = ecb; 10936 probe->dtpr_predcache = 0; 10937 10938 dtrace_sync(); 10939 } 10940 } 10941 10942 static void 10943 dtrace_ecb_resize(dtrace_ecb_t *ecb) 10944 { 10945 dtrace_action_t *act; 10946 uint32_t curneeded = UINT32_MAX; 10947 uint32_t aggbase = UINT32_MAX; 10948 10949 /* 10950 * If we record anything, we always record the dtrace_rechdr_t. (And 10951 * we always record it first.) 10952 */ 10953 ecb->dte_size = sizeof (dtrace_rechdr_t); 10954 ecb->dte_alignment = sizeof (dtrace_epid_t); 10955 10956 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10957 dtrace_recdesc_t *rec = &act->dta_rec; 10958 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 10959 10960 ecb->dte_alignment = MAX(ecb->dte_alignment, 10961 rec->dtrd_alignment); 10962 10963 if (DTRACEACT_ISAGG(act->dta_kind)) { 10964 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10965 10966 ASSERT(rec->dtrd_size != 0); 10967 ASSERT(agg->dtag_first != NULL); 10968 ASSERT(act->dta_prev->dta_intuple); 10969 ASSERT(aggbase != UINT32_MAX); 10970 ASSERT(curneeded != UINT32_MAX); 10971 10972 agg->dtag_base = aggbase; 10973 10974 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 10975 rec->dtrd_offset = curneeded; 10976 curneeded += rec->dtrd_size; 10977 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 10978 10979 aggbase = UINT32_MAX; 10980 curneeded = UINT32_MAX; 10981 } else if (act->dta_intuple) { 10982 if (curneeded == UINT32_MAX) { 10983 /* 10984 * This is the first record in a tuple. Align 10985 * curneeded to be at offset 4 in an 8-byte 10986 * aligned block. 10987 */ 10988 ASSERT(act->dta_prev == NULL || 10989 !act->dta_prev->dta_intuple); 10990 ASSERT3U(aggbase, ==, UINT32_MAX); 10991 curneeded = P2PHASEUP(ecb->dte_size, 10992 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 10993 10994 aggbase = curneeded - sizeof (dtrace_aggid_t); 10995 ASSERT(IS_P2ALIGNED(aggbase, 10996 sizeof (uint64_t))); 10997 } 10998 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 10999 rec->dtrd_offset = curneeded; 11000 curneeded += rec->dtrd_size; 11001 } else { 11002 /* tuples must be followed by an aggregation */ 11003 ASSERT(act->dta_prev == NULL || 11004 !act->dta_prev->dta_intuple); 11005 11006 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 11007 rec->dtrd_alignment); 11008 rec->dtrd_offset = ecb->dte_size; 11009 ecb->dte_size += rec->dtrd_size; 11010 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 11011 } 11012 } 11013 11014 if ((act = ecb->dte_action) != NULL && 11015 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 11016 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 11017 /* 11018 * If the size is still sizeof (dtrace_rechdr_t), then all 11019 * actions store no data; set the size to 0. 11020 */ 11021 ecb->dte_size = 0; 11022 } 11023 11024 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 11025 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 11026 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 11027 ecb->dte_needed); 11028 } 11029 11030 static dtrace_action_t * 11031 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11032 { 11033 dtrace_aggregation_t *agg; 11034 size_t size = sizeof (uint64_t); 11035 int ntuple = desc->dtad_ntuple; 11036 dtrace_action_t *act; 11037 dtrace_recdesc_t *frec; 11038 dtrace_aggid_t aggid; 11039 dtrace_state_t *state = ecb->dte_state; 11040 11041 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 11042 agg->dtag_ecb = ecb; 11043 11044 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 11045 11046 switch (desc->dtad_kind) { 11047 case DTRACEAGG_MIN: 11048 agg->dtag_initial = INT64_MAX; 11049 agg->dtag_aggregate = dtrace_aggregate_min; 11050 break; 11051 11052 case DTRACEAGG_MAX: 11053 agg->dtag_initial = INT64_MIN; 11054 agg->dtag_aggregate = dtrace_aggregate_max; 11055 break; 11056 11057 case DTRACEAGG_COUNT: 11058 agg->dtag_aggregate = dtrace_aggregate_count; 11059 break; 11060 11061 case DTRACEAGG_QUANTIZE: 11062 agg->dtag_aggregate = dtrace_aggregate_quantize; 11063 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 11064 sizeof (uint64_t); 11065 break; 11066 11067 case DTRACEAGG_LQUANTIZE: { 11068 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 11069 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 11070 11071 agg->dtag_initial = desc->dtad_arg; 11072 agg->dtag_aggregate = dtrace_aggregate_lquantize; 11073 11074 if (step == 0 || levels == 0) 11075 goto err; 11076 11077 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 11078 break; 11079 } 11080 11081 case DTRACEAGG_LLQUANTIZE: { 11082 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 11083 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 11084 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 11085 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 11086 int64_t v; 11087 11088 agg->dtag_initial = desc->dtad_arg; 11089 agg->dtag_aggregate = dtrace_aggregate_llquantize; 11090 11091 if (factor < 2 || low >= high || nsteps < factor) 11092 goto err; 11093 11094 /* 11095 * Now check that the number of steps evenly divides a power 11096 * of the factor. (This assures both integer bucket size and 11097 * linearity within each magnitude.) 11098 */ 11099 for (v = factor; v < nsteps; v *= factor) 11100 continue; 11101 11102 if ((v % nsteps) || (nsteps % factor)) 11103 goto err; 11104 11105 size = (dtrace_aggregate_llquantize_bucket(factor, 11106 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 11107 break; 11108 } 11109 11110 case DTRACEAGG_AVG: 11111 agg->dtag_aggregate = dtrace_aggregate_avg; 11112 size = sizeof (uint64_t) * 2; 11113 break; 11114 11115 case DTRACEAGG_STDDEV: 11116 agg->dtag_aggregate = dtrace_aggregate_stddev; 11117 size = sizeof (uint64_t) * 4; 11118 break; 11119 11120 case DTRACEAGG_SUM: 11121 agg->dtag_aggregate = dtrace_aggregate_sum; 11122 break; 11123 11124 default: 11125 goto err; 11126 } 11127 11128 agg->dtag_action.dta_rec.dtrd_size = size; 11129 11130 if (ntuple == 0) 11131 goto err; 11132 11133 /* 11134 * We must make sure that we have enough actions for the n-tuple. 11135 */ 11136 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 11137 if (DTRACEACT_ISAGG(act->dta_kind)) 11138 break; 11139 11140 if (--ntuple == 0) { 11141 /* 11142 * This is the action with which our n-tuple begins. 11143 */ 11144 agg->dtag_first = act; 11145 goto success; 11146 } 11147 } 11148 11149 /* 11150 * This n-tuple is short by ntuple elements. Return failure. 11151 */ 11152 ASSERT(ntuple != 0); 11153 err: 11154 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11155 return (NULL); 11156 11157 success: 11158 /* 11159 * If the last action in the tuple has a size of zero, it's actually 11160 * an expression argument for the aggregating action. 11161 */ 11162 ASSERT(ecb->dte_action_last != NULL); 11163 act = ecb->dte_action_last; 11164 11165 if (act->dta_kind == DTRACEACT_DIFEXPR) { 11166 ASSERT(act->dta_difo != NULL); 11167 11168 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 11169 agg->dtag_hasarg = 1; 11170 } 11171 11172 /* 11173 * We need to allocate an id for this aggregation. 11174 */ 11175 #ifdef illumos 11176 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 11177 VM_BESTFIT | VM_SLEEP); 11178 #else 11179 aggid = alloc_unr(state->dts_aggid_arena); 11180 #endif 11181 11182 if (aggid - 1 >= state->dts_naggregations) { 11183 dtrace_aggregation_t **oaggs = state->dts_aggregations; 11184 dtrace_aggregation_t **aggs; 11185 int naggs = state->dts_naggregations << 1; 11186 int onaggs = state->dts_naggregations; 11187 11188 ASSERT(aggid == state->dts_naggregations + 1); 11189 11190 if (naggs == 0) { 11191 ASSERT(oaggs == NULL); 11192 naggs = 1; 11193 } 11194 11195 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 11196 11197 if (oaggs != NULL) { 11198 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 11199 kmem_free(oaggs, onaggs * sizeof (*aggs)); 11200 } 11201 11202 state->dts_aggregations = aggs; 11203 state->dts_naggregations = naggs; 11204 } 11205 11206 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 11207 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 11208 11209 frec = &agg->dtag_first->dta_rec; 11210 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 11211 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 11212 11213 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 11214 ASSERT(!act->dta_intuple); 11215 act->dta_intuple = 1; 11216 } 11217 11218 return (&agg->dtag_action); 11219 } 11220 11221 static void 11222 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 11223 { 11224 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 11225 dtrace_state_t *state = ecb->dte_state; 11226 dtrace_aggid_t aggid = agg->dtag_id; 11227 11228 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 11229 #ifdef illumos 11230 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 11231 #else 11232 free_unr(state->dts_aggid_arena, aggid); 11233 #endif 11234 11235 ASSERT(state->dts_aggregations[aggid - 1] == agg); 11236 state->dts_aggregations[aggid - 1] = NULL; 11237 11238 kmem_free(agg, sizeof (dtrace_aggregation_t)); 11239 } 11240 11241 static int 11242 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 11243 { 11244 dtrace_action_t *action, *last; 11245 dtrace_difo_t *dp = desc->dtad_difo; 11246 uint32_t size = 0, align = sizeof (uint8_t), mask; 11247 uint16_t format = 0; 11248 dtrace_recdesc_t *rec; 11249 dtrace_state_t *state = ecb->dte_state; 11250 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 11251 uint64_t arg = desc->dtad_arg; 11252 11253 ASSERT(MUTEX_HELD(&dtrace_lock)); 11254 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 11255 11256 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 11257 /* 11258 * If this is an aggregating action, there must be neither 11259 * a speculate nor a commit on the action chain. 11260 */ 11261 dtrace_action_t *act; 11262 11263 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 11264 if (act->dta_kind == DTRACEACT_COMMIT) 11265 return (EINVAL); 11266 11267 if (act->dta_kind == DTRACEACT_SPECULATE) 11268 return (EINVAL); 11269 } 11270 11271 action = dtrace_ecb_aggregation_create(ecb, desc); 11272 11273 if (action == NULL) 11274 return (EINVAL); 11275 } else { 11276 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 11277 (desc->dtad_kind == DTRACEACT_DIFEXPR && 11278 dp != NULL && dp->dtdo_destructive)) { 11279 state->dts_destructive = 1; 11280 } 11281 11282 switch (desc->dtad_kind) { 11283 case DTRACEACT_PRINTF: 11284 case DTRACEACT_PRINTA: 11285 case DTRACEACT_SYSTEM: 11286 case DTRACEACT_FREOPEN: 11287 case DTRACEACT_DIFEXPR: 11288 /* 11289 * We know that our arg is a string -- turn it into a 11290 * format. 11291 */ 11292 if (arg == 0) { 11293 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 11294 desc->dtad_kind == DTRACEACT_DIFEXPR); 11295 format = 0; 11296 } else { 11297 ASSERT(arg != 0); 11298 #ifdef illumos 11299 ASSERT(arg > KERNELBASE); 11300 #endif 11301 format = dtrace_format_add(state, 11302 (char *)(uintptr_t)arg); 11303 } 11304 11305 /*FALLTHROUGH*/ 11306 case DTRACEACT_LIBACT: 11307 case DTRACEACT_TRACEMEM: 11308 case DTRACEACT_TRACEMEM_DYNSIZE: 11309 if (dp == NULL) 11310 return (EINVAL); 11311 11312 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 11313 break; 11314 11315 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 11316 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11317 return (EINVAL); 11318 11319 size = opt[DTRACEOPT_STRSIZE]; 11320 } 11321 11322 break; 11323 11324 case DTRACEACT_STACK: 11325 if ((nframes = arg) == 0) { 11326 nframes = opt[DTRACEOPT_STACKFRAMES]; 11327 ASSERT(nframes > 0); 11328 arg = nframes; 11329 } 11330 11331 size = nframes * sizeof (pc_t); 11332 break; 11333 11334 case DTRACEACT_JSTACK: 11335 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 11336 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 11337 11338 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 11339 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 11340 11341 arg = DTRACE_USTACK_ARG(nframes, strsize); 11342 11343 /*FALLTHROUGH*/ 11344 case DTRACEACT_USTACK: 11345 if (desc->dtad_kind != DTRACEACT_JSTACK && 11346 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 11347 strsize = DTRACE_USTACK_STRSIZE(arg); 11348 nframes = opt[DTRACEOPT_USTACKFRAMES]; 11349 ASSERT(nframes > 0); 11350 arg = DTRACE_USTACK_ARG(nframes, strsize); 11351 } 11352 11353 /* 11354 * Save a slot for the pid. 11355 */ 11356 size = (nframes + 1) * sizeof (uint64_t); 11357 size += DTRACE_USTACK_STRSIZE(arg); 11358 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 11359 11360 break; 11361 11362 case DTRACEACT_SYM: 11363 case DTRACEACT_MOD: 11364 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 11365 sizeof (uint64_t)) || 11366 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11367 return (EINVAL); 11368 break; 11369 11370 case DTRACEACT_USYM: 11371 case DTRACEACT_UMOD: 11372 case DTRACEACT_UADDR: 11373 if (dp == NULL || 11374 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 11375 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11376 return (EINVAL); 11377 11378 /* 11379 * We have a slot for the pid, plus a slot for the 11380 * argument. To keep things simple (aligned with 11381 * bitness-neutral sizing), we store each as a 64-bit 11382 * quantity. 11383 */ 11384 size = 2 * sizeof (uint64_t); 11385 break; 11386 11387 case DTRACEACT_STOP: 11388 case DTRACEACT_BREAKPOINT: 11389 case DTRACEACT_PANIC: 11390 break; 11391 11392 case DTRACEACT_CHILL: 11393 case DTRACEACT_DISCARD: 11394 case DTRACEACT_RAISE: 11395 if (dp == NULL) 11396 return (EINVAL); 11397 break; 11398 11399 case DTRACEACT_EXIT: 11400 if (dp == NULL || 11401 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 11402 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 11403 return (EINVAL); 11404 break; 11405 11406 case DTRACEACT_SPECULATE: 11407 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 11408 return (EINVAL); 11409 11410 if (dp == NULL) 11411 return (EINVAL); 11412 11413 state->dts_speculates = 1; 11414 break; 11415 11416 case DTRACEACT_PRINTM: 11417 size = dp->dtdo_rtype.dtdt_size; 11418 break; 11419 11420 case DTRACEACT_PRINTT: 11421 size = dp->dtdo_rtype.dtdt_size; 11422 break; 11423 11424 case DTRACEACT_COMMIT: { 11425 dtrace_action_t *act = ecb->dte_action; 11426 11427 for (; act != NULL; act = act->dta_next) { 11428 if (act->dta_kind == DTRACEACT_COMMIT) 11429 return (EINVAL); 11430 } 11431 11432 if (dp == NULL) 11433 return (EINVAL); 11434 break; 11435 } 11436 11437 default: 11438 return (EINVAL); 11439 } 11440 11441 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 11442 /* 11443 * If this is a data-storing action or a speculate, 11444 * we must be sure that there isn't a commit on the 11445 * action chain. 11446 */ 11447 dtrace_action_t *act = ecb->dte_action; 11448 11449 for (; act != NULL; act = act->dta_next) { 11450 if (act->dta_kind == DTRACEACT_COMMIT) 11451 return (EINVAL); 11452 } 11453 } 11454 11455 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 11456 action->dta_rec.dtrd_size = size; 11457 } 11458 11459 action->dta_refcnt = 1; 11460 rec = &action->dta_rec; 11461 size = rec->dtrd_size; 11462 11463 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 11464 if (!(size & mask)) { 11465 align = mask + 1; 11466 break; 11467 } 11468 } 11469 11470 action->dta_kind = desc->dtad_kind; 11471 11472 if ((action->dta_difo = dp) != NULL) 11473 dtrace_difo_hold(dp); 11474 11475 rec->dtrd_action = action->dta_kind; 11476 rec->dtrd_arg = arg; 11477 rec->dtrd_uarg = desc->dtad_uarg; 11478 rec->dtrd_alignment = (uint16_t)align; 11479 rec->dtrd_format = format; 11480 11481 if ((last = ecb->dte_action_last) != NULL) { 11482 ASSERT(ecb->dte_action != NULL); 11483 action->dta_prev = last; 11484 last->dta_next = action; 11485 } else { 11486 ASSERT(ecb->dte_action == NULL); 11487 ecb->dte_action = action; 11488 } 11489 11490 ecb->dte_action_last = action; 11491 11492 return (0); 11493 } 11494 11495 static void 11496 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 11497 { 11498 dtrace_action_t *act = ecb->dte_action, *next; 11499 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 11500 dtrace_difo_t *dp; 11501 uint16_t format; 11502 11503 if (act != NULL && act->dta_refcnt > 1) { 11504 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 11505 act->dta_refcnt--; 11506 } else { 11507 for (; act != NULL; act = next) { 11508 next = act->dta_next; 11509 ASSERT(next != NULL || act == ecb->dte_action_last); 11510 ASSERT(act->dta_refcnt == 1); 11511 11512 if ((format = act->dta_rec.dtrd_format) != 0) 11513 dtrace_format_remove(ecb->dte_state, format); 11514 11515 if ((dp = act->dta_difo) != NULL) 11516 dtrace_difo_release(dp, vstate); 11517 11518 if (DTRACEACT_ISAGG(act->dta_kind)) { 11519 dtrace_ecb_aggregation_destroy(ecb, act); 11520 } else { 11521 kmem_free(act, sizeof (dtrace_action_t)); 11522 } 11523 } 11524 } 11525 11526 ecb->dte_action = NULL; 11527 ecb->dte_action_last = NULL; 11528 ecb->dte_size = 0; 11529 } 11530 11531 static void 11532 dtrace_ecb_disable(dtrace_ecb_t *ecb) 11533 { 11534 /* 11535 * We disable the ECB by removing it from its probe. 11536 */ 11537 dtrace_ecb_t *pecb, *prev = NULL; 11538 dtrace_probe_t *probe = ecb->dte_probe; 11539 11540 ASSERT(MUTEX_HELD(&dtrace_lock)); 11541 11542 if (probe == NULL) { 11543 /* 11544 * This is the NULL probe; there is nothing to disable. 11545 */ 11546 return; 11547 } 11548 11549 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 11550 if (pecb == ecb) 11551 break; 11552 prev = pecb; 11553 } 11554 11555 ASSERT(pecb != NULL); 11556 11557 if (prev == NULL) { 11558 probe->dtpr_ecb = ecb->dte_next; 11559 } else { 11560 prev->dte_next = ecb->dte_next; 11561 } 11562 11563 if (ecb == probe->dtpr_ecb_last) { 11564 ASSERT(ecb->dte_next == NULL); 11565 probe->dtpr_ecb_last = prev; 11566 } 11567 11568 /* 11569 * The ECB has been disconnected from the probe; now sync to assure 11570 * that all CPUs have seen the change before returning. 11571 */ 11572 dtrace_sync(); 11573 11574 if (probe->dtpr_ecb == NULL) { 11575 /* 11576 * That was the last ECB on the probe; clear the predicate 11577 * cache ID for the probe, disable it and sync one more time 11578 * to assure that we'll never hit it again. 11579 */ 11580 dtrace_provider_t *prov = probe->dtpr_provider; 11581 11582 ASSERT(ecb->dte_next == NULL); 11583 ASSERT(probe->dtpr_ecb_last == NULL); 11584 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 11585 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 11586 probe->dtpr_id, probe->dtpr_arg); 11587 dtrace_sync(); 11588 } else { 11589 /* 11590 * There is at least one ECB remaining on the probe. If there 11591 * is _exactly_ one, set the probe's predicate cache ID to be 11592 * the predicate cache ID of the remaining ECB. 11593 */ 11594 ASSERT(probe->dtpr_ecb_last != NULL); 11595 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 11596 11597 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 11598 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 11599 11600 ASSERT(probe->dtpr_ecb->dte_next == NULL); 11601 11602 if (p != NULL) 11603 probe->dtpr_predcache = p->dtp_cacheid; 11604 } 11605 11606 ecb->dte_next = NULL; 11607 } 11608 } 11609 11610 static void 11611 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 11612 { 11613 dtrace_state_t *state = ecb->dte_state; 11614 dtrace_vstate_t *vstate = &state->dts_vstate; 11615 dtrace_predicate_t *pred; 11616 dtrace_epid_t epid = ecb->dte_epid; 11617 11618 ASSERT(MUTEX_HELD(&dtrace_lock)); 11619 ASSERT(ecb->dte_next == NULL); 11620 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 11621 11622 if ((pred = ecb->dte_predicate) != NULL) 11623 dtrace_predicate_release(pred, vstate); 11624 11625 dtrace_ecb_action_remove(ecb); 11626 11627 ASSERT(state->dts_ecbs[epid - 1] == ecb); 11628 state->dts_ecbs[epid - 1] = NULL; 11629 11630 kmem_free(ecb, sizeof (dtrace_ecb_t)); 11631 } 11632 11633 static dtrace_ecb_t * 11634 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 11635 dtrace_enabling_t *enab) 11636 { 11637 dtrace_ecb_t *ecb; 11638 dtrace_predicate_t *pred; 11639 dtrace_actdesc_t *act; 11640 dtrace_provider_t *prov; 11641 dtrace_ecbdesc_t *desc = enab->dten_current; 11642 11643 ASSERT(MUTEX_HELD(&dtrace_lock)); 11644 ASSERT(state != NULL); 11645 11646 ecb = dtrace_ecb_add(state, probe); 11647 ecb->dte_uarg = desc->dted_uarg; 11648 11649 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 11650 dtrace_predicate_hold(pred); 11651 ecb->dte_predicate = pred; 11652 } 11653 11654 if (probe != NULL) { 11655 /* 11656 * If the provider shows more leg than the consumer is old 11657 * enough to see, we need to enable the appropriate implicit 11658 * predicate bits to prevent the ecb from activating at 11659 * revealing times. 11660 * 11661 * Providers specifying DTRACE_PRIV_USER at register time 11662 * are stating that they need the /proc-style privilege 11663 * model to be enforced, and this is what DTRACE_COND_OWNER 11664 * and DTRACE_COND_ZONEOWNER will then do at probe time. 11665 */ 11666 prov = probe->dtpr_provider; 11667 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 11668 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11669 ecb->dte_cond |= DTRACE_COND_OWNER; 11670 11671 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 11672 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 11673 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 11674 11675 /* 11676 * If the provider shows us kernel innards and the user 11677 * is lacking sufficient privilege, enable the 11678 * DTRACE_COND_USERMODE implicit predicate. 11679 */ 11680 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 11681 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 11682 ecb->dte_cond |= DTRACE_COND_USERMODE; 11683 } 11684 11685 if (dtrace_ecb_create_cache != NULL) { 11686 /* 11687 * If we have a cached ecb, we'll use its action list instead 11688 * of creating our own (saving both time and space). 11689 */ 11690 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 11691 dtrace_action_t *act = cached->dte_action; 11692 11693 if (act != NULL) { 11694 ASSERT(act->dta_refcnt > 0); 11695 act->dta_refcnt++; 11696 ecb->dte_action = act; 11697 ecb->dte_action_last = cached->dte_action_last; 11698 ecb->dte_needed = cached->dte_needed; 11699 ecb->dte_size = cached->dte_size; 11700 ecb->dte_alignment = cached->dte_alignment; 11701 } 11702 11703 return (ecb); 11704 } 11705 11706 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 11707 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 11708 dtrace_ecb_destroy(ecb); 11709 return (NULL); 11710 } 11711 } 11712 11713 dtrace_ecb_resize(ecb); 11714 11715 return (dtrace_ecb_create_cache = ecb); 11716 } 11717 11718 static int 11719 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 11720 { 11721 dtrace_ecb_t *ecb; 11722 dtrace_enabling_t *enab = arg; 11723 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 11724 11725 ASSERT(state != NULL); 11726 11727 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 11728 /* 11729 * This probe was created in a generation for which this 11730 * enabling has previously created ECBs; we don't want to 11731 * enable it again, so just kick out. 11732 */ 11733 return (DTRACE_MATCH_NEXT); 11734 } 11735 11736 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 11737 return (DTRACE_MATCH_DONE); 11738 11739 dtrace_ecb_enable(ecb); 11740 return (DTRACE_MATCH_NEXT); 11741 } 11742 11743 static dtrace_ecb_t * 11744 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 11745 { 11746 dtrace_ecb_t *ecb; 11747 11748 ASSERT(MUTEX_HELD(&dtrace_lock)); 11749 11750 if (id == 0 || id > state->dts_necbs) 11751 return (NULL); 11752 11753 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 11754 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 11755 11756 return (state->dts_ecbs[id - 1]); 11757 } 11758 11759 static dtrace_aggregation_t * 11760 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 11761 { 11762 dtrace_aggregation_t *agg; 11763 11764 ASSERT(MUTEX_HELD(&dtrace_lock)); 11765 11766 if (id == 0 || id > state->dts_naggregations) 11767 return (NULL); 11768 11769 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 11770 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 11771 agg->dtag_id == id); 11772 11773 return (state->dts_aggregations[id - 1]); 11774 } 11775 11776 /* 11777 * DTrace Buffer Functions 11778 * 11779 * The following functions manipulate DTrace buffers. Most of these functions 11780 * are called in the context of establishing or processing consumer state; 11781 * exceptions are explicitly noted. 11782 */ 11783 11784 /* 11785 * Note: called from cross call context. This function switches the two 11786 * buffers on a given CPU. The atomicity of this operation is assured by 11787 * disabling interrupts while the actual switch takes place; the disabling of 11788 * interrupts serializes the execution with any execution of dtrace_probe() on 11789 * the same CPU. 11790 */ 11791 static void 11792 dtrace_buffer_switch(dtrace_buffer_t *buf) 11793 { 11794 caddr_t tomax = buf->dtb_tomax; 11795 caddr_t xamot = buf->dtb_xamot; 11796 dtrace_icookie_t cookie; 11797 hrtime_t now; 11798 11799 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11800 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 11801 11802 cookie = dtrace_interrupt_disable(); 11803 now = dtrace_gethrtime(); 11804 buf->dtb_tomax = xamot; 11805 buf->dtb_xamot = tomax; 11806 buf->dtb_xamot_drops = buf->dtb_drops; 11807 buf->dtb_xamot_offset = buf->dtb_offset; 11808 buf->dtb_xamot_errors = buf->dtb_errors; 11809 buf->dtb_xamot_flags = buf->dtb_flags; 11810 buf->dtb_offset = 0; 11811 buf->dtb_drops = 0; 11812 buf->dtb_errors = 0; 11813 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 11814 buf->dtb_interval = now - buf->dtb_switched; 11815 buf->dtb_switched = now; 11816 dtrace_interrupt_enable(cookie); 11817 } 11818 11819 /* 11820 * Note: called from cross call context. This function activates a buffer 11821 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 11822 * is guaranteed by the disabling of interrupts. 11823 */ 11824 static void 11825 dtrace_buffer_activate(dtrace_state_t *state) 11826 { 11827 dtrace_buffer_t *buf; 11828 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 11829 11830 buf = &state->dts_buffer[curcpu]; 11831 11832 if (buf->dtb_tomax != NULL) { 11833 /* 11834 * We might like to assert that the buffer is marked inactive, 11835 * but this isn't necessarily true: the buffer for the CPU 11836 * that processes the BEGIN probe has its buffer activated 11837 * manually. In this case, we take the (harmless) action 11838 * re-clearing the bit INACTIVE bit. 11839 */ 11840 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 11841 } 11842 11843 dtrace_interrupt_enable(cookie); 11844 } 11845 11846 static int 11847 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 11848 processorid_t cpu, int *factor) 11849 { 11850 #ifdef illumos 11851 cpu_t *cp; 11852 #endif 11853 dtrace_buffer_t *buf; 11854 int allocated = 0, desired = 0; 11855 11856 #ifdef illumos 11857 ASSERT(MUTEX_HELD(&cpu_lock)); 11858 ASSERT(MUTEX_HELD(&dtrace_lock)); 11859 11860 *factor = 1; 11861 11862 if (size > dtrace_nonroot_maxsize && 11863 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 11864 return (EFBIG); 11865 11866 cp = cpu_list; 11867 11868 do { 11869 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 11870 continue; 11871 11872 buf = &bufs[cp->cpu_id]; 11873 11874 /* 11875 * If there is already a buffer allocated for this CPU, it 11876 * is only possible that this is a DR event. In this case, 11877 */ 11878 if (buf->dtb_tomax != NULL) { 11879 ASSERT(buf->dtb_size == size); 11880 continue; 11881 } 11882 11883 ASSERT(buf->dtb_xamot == NULL); 11884 11885 if ((buf->dtb_tomax = kmem_zalloc(size, 11886 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11887 goto err; 11888 11889 buf->dtb_size = size; 11890 buf->dtb_flags = flags; 11891 buf->dtb_offset = 0; 11892 buf->dtb_drops = 0; 11893 11894 if (flags & DTRACEBUF_NOSWITCH) 11895 continue; 11896 11897 if ((buf->dtb_xamot = kmem_zalloc(size, 11898 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11899 goto err; 11900 } while ((cp = cp->cpu_next) != cpu_list); 11901 11902 return (0); 11903 11904 err: 11905 cp = cpu_list; 11906 11907 do { 11908 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 11909 continue; 11910 11911 buf = &bufs[cp->cpu_id]; 11912 desired += 2; 11913 11914 if (buf->dtb_xamot != NULL) { 11915 ASSERT(buf->dtb_tomax != NULL); 11916 ASSERT(buf->dtb_size == size); 11917 kmem_free(buf->dtb_xamot, size); 11918 allocated++; 11919 } 11920 11921 if (buf->dtb_tomax != NULL) { 11922 ASSERT(buf->dtb_size == size); 11923 kmem_free(buf->dtb_tomax, size); 11924 allocated++; 11925 } 11926 11927 buf->dtb_tomax = NULL; 11928 buf->dtb_xamot = NULL; 11929 buf->dtb_size = 0; 11930 } while ((cp = cp->cpu_next) != cpu_list); 11931 #else 11932 int i; 11933 11934 *factor = 1; 11935 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ 11936 defined(__mips__) || defined(__powerpc__) 11937 /* 11938 * FreeBSD isn't good at limiting the amount of memory we 11939 * ask to malloc, so let's place a limit here before trying 11940 * to do something that might well end in tears at bedtime. 11941 */ 11942 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 11943 return (ENOMEM); 11944 #endif 11945 11946 ASSERT(MUTEX_HELD(&dtrace_lock)); 11947 CPU_FOREACH(i) { 11948 if (cpu != DTRACE_CPUALL && cpu != i) 11949 continue; 11950 11951 buf = &bufs[i]; 11952 11953 /* 11954 * If there is already a buffer allocated for this CPU, it 11955 * is only possible that this is a DR event. In this case, 11956 * the buffer size must match our specified size. 11957 */ 11958 if (buf->dtb_tomax != NULL) { 11959 ASSERT(buf->dtb_size == size); 11960 continue; 11961 } 11962 11963 ASSERT(buf->dtb_xamot == NULL); 11964 11965 if ((buf->dtb_tomax = kmem_zalloc(size, 11966 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11967 goto err; 11968 11969 buf->dtb_size = size; 11970 buf->dtb_flags = flags; 11971 buf->dtb_offset = 0; 11972 buf->dtb_drops = 0; 11973 11974 if (flags & DTRACEBUF_NOSWITCH) 11975 continue; 11976 11977 if ((buf->dtb_xamot = kmem_zalloc(size, 11978 KM_NOSLEEP | KM_NORMALPRI)) == NULL) 11979 goto err; 11980 } 11981 11982 return (0); 11983 11984 err: 11985 /* 11986 * Error allocating memory, so free the buffers that were 11987 * allocated before the failed allocation. 11988 */ 11989 CPU_FOREACH(i) { 11990 if (cpu != DTRACE_CPUALL && cpu != i) 11991 continue; 11992 11993 buf = &bufs[i]; 11994 desired += 2; 11995 11996 if (buf->dtb_xamot != NULL) { 11997 ASSERT(buf->dtb_tomax != NULL); 11998 ASSERT(buf->dtb_size == size); 11999 kmem_free(buf->dtb_xamot, size); 12000 allocated++; 12001 } 12002 12003 if (buf->dtb_tomax != NULL) { 12004 ASSERT(buf->dtb_size == size); 12005 kmem_free(buf->dtb_tomax, size); 12006 allocated++; 12007 } 12008 12009 buf->dtb_tomax = NULL; 12010 buf->dtb_xamot = NULL; 12011 buf->dtb_size = 0; 12012 12013 } 12014 #endif 12015 *factor = desired / (allocated > 0 ? allocated : 1); 12016 12017 return (ENOMEM); 12018 } 12019 12020 /* 12021 * Note: called from probe context. This function just increments the drop 12022 * count on a buffer. It has been made a function to allow for the 12023 * possibility of understanding the source of mysterious drop counts. (A 12024 * problem for which one may be particularly disappointed that DTrace cannot 12025 * be used to understand DTrace.) 12026 */ 12027 static void 12028 dtrace_buffer_drop(dtrace_buffer_t *buf) 12029 { 12030 buf->dtb_drops++; 12031 } 12032 12033 /* 12034 * Note: called from probe context. This function is called to reserve space 12035 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 12036 * mstate. Returns the new offset in the buffer, or a negative value if an 12037 * error has occurred. 12038 */ 12039 static intptr_t 12040 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 12041 dtrace_state_t *state, dtrace_mstate_t *mstate) 12042 { 12043 intptr_t offs = buf->dtb_offset, soffs; 12044 intptr_t woffs; 12045 caddr_t tomax; 12046 size_t total; 12047 12048 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 12049 return (-1); 12050 12051 if ((tomax = buf->dtb_tomax) == NULL) { 12052 dtrace_buffer_drop(buf); 12053 return (-1); 12054 } 12055 12056 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 12057 while (offs & (align - 1)) { 12058 /* 12059 * Assert that our alignment is off by a number which 12060 * is itself sizeof (uint32_t) aligned. 12061 */ 12062 ASSERT(!((align - (offs & (align - 1))) & 12063 (sizeof (uint32_t) - 1))); 12064 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12065 offs += sizeof (uint32_t); 12066 } 12067 12068 if ((soffs = offs + needed) > buf->dtb_size) { 12069 dtrace_buffer_drop(buf); 12070 return (-1); 12071 } 12072 12073 if (mstate == NULL) 12074 return (offs); 12075 12076 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 12077 mstate->dtms_scratch_size = buf->dtb_size - soffs; 12078 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12079 12080 return (offs); 12081 } 12082 12083 if (buf->dtb_flags & DTRACEBUF_FILL) { 12084 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 12085 (buf->dtb_flags & DTRACEBUF_FULL)) 12086 return (-1); 12087 goto out; 12088 } 12089 12090 total = needed + (offs & (align - 1)); 12091 12092 /* 12093 * For a ring buffer, life is quite a bit more complicated. Before 12094 * we can store any padding, we need to adjust our wrapping offset. 12095 * (If we've never before wrapped or we're not about to, no adjustment 12096 * is required.) 12097 */ 12098 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 12099 offs + total > buf->dtb_size) { 12100 woffs = buf->dtb_xamot_offset; 12101 12102 if (offs + total > buf->dtb_size) { 12103 /* 12104 * We can't fit in the end of the buffer. First, a 12105 * sanity check that we can fit in the buffer at all. 12106 */ 12107 if (total > buf->dtb_size) { 12108 dtrace_buffer_drop(buf); 12109 return (-1); 12110 } 12111 12112 /* 12113 * We're going to be storing at the top of the buffer, 12114 * so now we need to deal with the wrapped offset. We 12115 * only reset our wrapped offset to 0 if it is 12116 * currently greater than the current offset. If it 12117 * is less than the current offset, it is because a 12118 * previous allocation induced a wrap -- but the 12119 * allocation didn't subsequently take the space due 12120 * to an error or false predicate evaluation. In this 12121 * case, we'll just leave the wrapped offset alone: if 12122 * the wrapped offset hasn't been advanced far enough 12123 * for this allocation, it will be adjusted in the 12124 * lower loop. 12125 */ 12126 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 12127 if (woffs >= offs) 12128 woffs = 0; 12129 } else { 12130 woffs = 0; 12131 } 12132 12133 /* 12134 * Now we know that we're going to be storing to the 12135 * top of the buffer and that there is room for us 12136 * there. We need to clear the buffer from the current 12137 * offset to the end (there may be old gunk there). 12138 */ 12139 while (offs < buf->dtb_size) 12140 tomax[offs++] = 0; 12141 12142 /* 12143 * We need to set our offset to zero. And because we 12144 * are wrapping, we need to set the bit indicating as 12145 * much. We can also adjust our needed space back 12146 * down to the space required by the ECB -- we know 12147 * that the top of the buffer is aligned. 12148 */ 12149 offs = 0; 12150 total = needed; 12151 buf->dtb_flags |= DTRACEBUF_WRAPPED; 12152 } else { 12153 /* 12154 * There is room for us in the buffer, so we simply 12155 * need to check the wrapped offset. 12156 */ 12157 if (woffs < offs) { 12158 /* 12159 * The wrapped offset is less than the offset. 12160 * This can happen if we allocated buffer space 12161 * that induced a wrap, but then we didn't 12162 * subsequently take the space due to an error 12163 * or false predicate evaluation. This is 12164 * okay; we know that _this_ allocation isn't 12165 * going to induce a wrap. We still can't 12166 * reset the wrapped offset to be zero, 12167 * however: the space may have been trashed in 12168 * the previous failed probe attempt. But at 12169 * least the wrapped offset doesn't need to 12170 * be adjusted at all... 12171 */ 12172 goto out; 12173 } 12174 } 12175 12176 while (offs + total > woffs) { 12177 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 12178 size_t size; 12179 12180 if (epid == DTRACE_EPIDNONE) { 12181 size = sizeof (uint32_t); 12182 } else { 12183 ASSERT3U(epid, <=, state->dts_necbs); 12184 ASSERT(state->dts_ecbs[epid - 1] != NULL); 12185 12186 size = state->dts_ecbs[epid - 1]->dte_size; 12187 } 12188 12189 ASSERT(woffs + size <= buf->dtb_size); 12190 ASSERT(size != 0); 12191 12192 if (woffs + size == buf->dtb_size) { 12193 /* 12194 * We've reached the end of the buffer; we want 12195 * to set the wrapped offset to 0 and break 12196 * out. However, if the offs is 0, then we're 12197 * in a strange edge-condition: the amount of 12198 * space that we want to reserve plus the size 12199 * of the record that we're overwriting is 12200 * greater than the size of the buffer. This 12201 * is problematic because if we reserve the 12202 * space but subsequently don't consume it (due 12203 * to a failed predicate or error) the wrapped 12204 * offset will be 0 -- yet the EPID at offset 0 12205 * will not be committed. This situation is 12206 * relatively easy to deal with: if we're in 12207 * this case, the buffer is indistinguishable 12208 * from one that hasn't wrapped; we need only 12209 * finish the job by clearing the wrapped bit, 12210 * explicitly setting the offset to be 0, and 12211 * zero'ing out the old data in the buffer. 12212 */ 12213 if (offs == 0) { 12214 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 12215 buf->dtb_offset = 0; 12216 woffs = total; 12217 12218 while (woffs < buf->dtb_size) 12219 tomax[woffs++] = 0; 12220 } 12221 12222 woffs = 0; 12223 break; 12224 } 12225 12226 woffs += size; 12227 } 12228 12229 /* 12230 * We have a wrapped offset. It may be that the wrapped offset 12231 * has become zero -- that's okay. 12232 */ 12233 buf->dtb_xamot_offset = woffs; 12234 } 12235 12236 out: 12237 /* 12238 * Now we can plow the buffer with any necessary padding. 12239 */ 12240 while (offs & (align - 1)) { 12241 /* 12242 * Assert that our alignment is off by a number which 12243 * is itself sizeof (uint32_t) aligned. 12244 */ 12245 ASSERT(!((align - (offs & (align - 1))) & 12246 (sizeof (uint32_t) - 1))); 12247 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 12248 offs += sizeof (uint32_t); 12249 } 12250 12251 if (buf->dtb_flags & DTRACEBUF_FILL) { 12252 if (offs + needed > buf->dtb_size - state->dts_reserve) { 12253 buf->dtb_flags |= DTRACEBUF_FULL; 12254 return (-1); 12255 } 12256 } 12257 12258 if (mstate == NULL) 12259 return (offs); 12260 12261 /* 12262 * For ring buffers and fill buffers, the scratch space is always 12263 * the inactive buffer. 12264 */ 12265 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 12266 mstate->dtms_scratch_size = buf->dtb_size; 12267 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 12268 12269 return (offs); 12270 } 12271 12272 static void 12273 dtrace_buffer_polish(dtrace_buffer_t *buf) 12274 { 12275 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 12276 ASSERT(MUTEX_HELD(&dtrace_lock)); 12277 12278 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 12279 return; 12280 12281 /* 12282 * We need to polish the ring buffer. There are three cases: 12283 * 12284 * - The first (and presumably most common) is that there is no gap 12285 * between the buffer offset and the wrapped offset. In this case, 12286 * there is nothing in the buffer that isn't valid data; we can 12287 * mark the buffer as polished and return. 12288 * 12289 * - The second (less common than the first but still more common 12290 * than the third) is that there is a gap between the buffer offset 12291 * and the wrapped offset, and the wrapped offset is larger than the 12292 * buffer offset. This can happen because of an alignment issue, or 12293 * can happen because of a call to dtrace_buffer_reserve() that 12294 * didn't subsequently consume the buffer space. In this case, 12295 * we need to zero the data from the buffer offset to the wrapped 12296 * offset. 12297 * 12298 * - The third (and least common) is that there is a gap between the 12299 * buffer offset and the wrapped offset, but the wrapped offset is 12300 * _less_ than the buffer offset. This can only happen because a 12301 * call to dtrace_buffer_reserve() induced a wrap, but the space 12302 * was not subsequently consumed. In this case, we need to zero the 12303 * space from the offset to the end of the buffer _and_ from the 12304 * top of the buffer to the wrapped offset. 12305 */ 12306 if (buf->dtb_offset < buf->dtb_xamot_offset) { 12307 bzero(buf->dtb_tomax + buf->dtb_offset, 12308 buf->dtb_xamot_offset - buf->dtb_offset); 12309 } 12310 12311 if (buf->dtb_offset > buf->dtb_xamot_offset) { 12312 bzero(buf->dtb_tomax + buf->dtb_offset, 12313 buf->dtb_size - buf->dtb_offset); 12314 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 12315 } 12316 } 12317 12318 /* 12319 * This routine determines if data generated at the specified time has likely 12320 * been entirely consumed at user-level. This routine is called to determine 12321 * if an ECB on a defunct probe (but for an active enabling) can be safely 12322 * disabled and destroyed. 12323 */ 12324 static int 12325 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 12326 { 12327 int i; 12328 12329 for (i = 0; i < NCPU; i++) { 12330 dtrace_buffer_t *buf = &bufs[i]; 12331 12332 if (buf->dtb_size == 0) 12333 continue; 12334 12335 if (buf->dtb_flags & DTRACEBUF_RING) 12336 return (0); 12337 12338 if (!buf->dtb_switched && buf->dtb_offset != 0) 12339 return (0); 12340 12341 if (buf->dtb_switched - buf->dtb_interval < when) 12342 return (0); 12343 } 12344 12345 return (1); 12346 } 12347 12348 static void 12349 dtrace_buffer_free(dtrace_buffer_t *bufs) 12350 { 12351 int i; 12352 12353 for (i = 0; i < NCPU; i++) { 12354 dtrace_buffer_t *buf = &bufs[i]; 12355 12356 if (buf->dtb_tomax == NULL) { 12357 ASSERT(buf->dtb_xamot == NULL); 12358 ASSERT(buf->dtb_size == 0); 12359 continue; 12360 } 12361 12362 if (buf->dtb_xamot != NULL) { 12363 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 12364 kmem_free(buf->dtb_xamot, buf->dtb_size); 12365 } 12366 12367 kmem_free(buf->dtb_tomax, buf->dtb_size); 12368 buf->dtb_size = 0; 12369 buf->dtb_tomax = NULL; 12370 buf->dtb_xamot = NULL; 12371 } 12372 } 12373 12374 /* 12375 * DTrace Enabling Functions 12376 */ 12377 static dtrace_enabling_t * 12378 dtrace_enabling_create(dtrace_vstate_t *vstate) 12379 { 12380 dtrace_enabling_t *enab; 12381 12382 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 12383 enab->dten_vstate = vstate; 12384 12385 return (enab); 12386 } 12387 12388 static void 12389 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 12390 { 12391 dtrace_ecbdesc_t **ndesc; 12392 size_t osize, nsize; 12393 12394 /* 12395 * We can't add to enablings after we've enabled them, or after we've 12396 * retained them. 12397 */ 12398 ASSERT(enab->dten_probegen == 0); 12399 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12400 12401 if (enab->dten_ndesc < enab->dten_maxdesc) { 12402 enab->dten_desc[enab->dten_ndesc++] = ecb; 12403 return; 12404 } 12405 12406 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12407 12408 if (enab->dten_maxdesc == 0) { 12409 enab->dten_maxdesc = 1; 12410 } else { 12411 enab->dten_maxdesc <<= 1; 12412 } 12413 12414 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 12415 12416 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 12417 ndesc = kmem_zalloc(nsize, KM_SLEEP); 12418 bcopy(enab->dten_desc, ndesc, osize); 12419 if (enab->dten_desc != NULL) 12420 kmem_free(enab->dten_desc, osize); 12421 12422 enab->dten_desc = ndesc; 12423 enab->dten_desc[enab->dten_ndesc++] = ecb; 12424 } 12425 12426 static void 12427 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 12428 dtrace_probedesc_t *pd) 12429 { 12430 dtrace_ecbdesc_t *new; 12431 dtrace_predicate_t *pred; 12432 dtrace_actdesc_t *act; 12433 12434 /* 12435 * We're going to create a new ECB description that matches the 12436 * specified ECB in every way, but has the specified probe description. 12437 */ 12438 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12439 12440 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 12441 dtrace_predicate_hold(pred); 12442 12443 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 12444 dtrace_actdesc_hold(act); 12445 12446 new->dted_action = ecb->dted_action; 12447 new->dted_pred = ecb->dted_pred; 12448 new->dted_probe = *pd; 12449 new->dted_uarg = ecb->dted_uarg; 12450 12451 dtrace_enabling_add(enab, new); 12452 } 12453 12454 static void 12455 dtrace_enabling_dump(dtrace_enabling_t *enab) 12456 { 12457 int i; 12458 12459 for (i = 0; i < enab->dten_ndesc; i++) { 12460 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 12461 12462 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 12463 desc->dtpd_provider, desc->dtpd_mod, 12464 desc->dtpd_func, desc->dtpd_name); 12465 } 12466 } 12467 12468 static void 12469 dtrace_enabling_destroy(dtrace_enabling_t *enab) 12470 { 12471 int i; 12472 dtrace_ecbdesc_t *ep; 12473 dtrace_vstate_t *vstate = enab->dten_vstate; 12474 12475 ASSERT(MUTEX_HELD(&dtrace_lock)); 12476 12477 for (i = 0; i < enab->dten_ndesc; i++) { 12478 dtrace_actdesc_t *act, *next; 12479 dtrace_predicate_t *pred; 12480 12481 ep = enab->dten_desc[i]; 12482 12483 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 12484 dtrace_predicate_release(pred, vstate); 12485 12486 for (act = ep->dted_action; act != NULL; act = next) { 12487 next = act->dtad_next; 12488 dtrace_actdesc_release(act, vstate); 12489 } 12490 12491 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12492 } 12493 12494 if (enab->dten_desc != NULL) 12495 kmem_free(enab->dten_desc, 12496 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 12497 12498 /* 12499 * If this was a retained enabling, decrement the dts_nretained count 12500 * and take it off of the dtrace_retained list. 12501 */ 12502 if (enab->dten_prev != NULL || enab->dten_next != NULL || 12503 dtrace_retained == enab) { 12504 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12505 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 12506 enab->dten_vstate->dtvs_state->dts_nretained--; 12507 dtrace_retained_gen++; 12508 } 12509 12510 if (enab->dten_prev == NULL) { 12511 if (dtrace_retained == enab) { 12512 dtrace_retained = enab->dten_next; 12513 12514 if (dtrace_retained != NULL) 12515 dtrace_retained->dten_prev = NULL; 12516 } 12517 } else { 12518 ASSERT(enab != dtrace_retained); 12519 ASSERT(dtrace_retained != NULL); 12520 enab->dten_prev->dten_next = enab->dten_next; 12521 } 12522 12523 if (enab->dten_next != NULL) { 12524 ASSERT(dtrace_retained != NULL); 12525 enab->dten_next->dten_prev = enab->dten_prev; 12526 } 12527 12528 kmem_free(enab, sizeof (dtrace_enabling_t)); 12529 } 12530 12531 static int 12532 dtrace_enabling_retain(dtrace_enabling_t *enab) 12533 { 12534 dtrace_state_t *state; 12535 12536 ASSERT(MUTEX_HELD(&dtrace_lock)); 12537 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 12538 ASSERT(enab->dten_vstate != NULL); 12539 12540 state = enab->dten_vstate->dtvs_state; 12541 ASSERT(state != NULL); 12542 12543 /* 12544 * We only allow each state to retain dtrace_retain_max enablings. 12545 */ 12546 if (state->dts_nretained >= dtrace_retain_max) 12547 return (ENOSPC); 12548 12549 state->dts_nretained++; 12550 dtrace_retained_gen++; 12551 12552 if (dtrace_retained == NULL) { 12553 dtrace_retained = enab; 12554 return (0); 12555 } 12556 12557 enab->dten_next = dtrace_retained; 12558 dtrace_retained->dten_prev = enab; 12559 dtrace_retained = enab; 12560 12561 return (0); 12562 } 12563 12564 static int 12565 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 12566 dtrace_probedesc_t *create) 12567 { 12568 dtrace_enabling_t *new, *enab; 12569 int found = 0, err = ENOENT; 12570 12571 ASSERT(MUTEX_HELD(&dtrace_lock)); 12572 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 12573 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 12574 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 12575 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 12576 12577 new = dtrace_enabling_create(&state->dts_vstate); 12578 12579 /* 12580 * Iterate over all retained enablings, looking for enablings that 12581 * match the specified state. 12582 */ 12583 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12584 int i; 12585 12586 /* 12587 * dtvs_state can only be NULL for helper enablings -- and 12588 * helper enablings can't be retained. 12589 */ 12590 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12591 12592 if (enab->dten_vstate->dtvs_state != state) 12593 continue; 12594 12595 /* 12596 * Now iterate over each probe description; we're looking for 12597 * an exact match to the specified probe description. 12598 */ 12599 for (i = 0; i < enab->dten_ndesc; i++) { 12600 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12601 dtrace_probedesc_t *pd = &ep->dted_probe; 12602 12603 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 12604 continue; 12605 12606 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 12607 continue; 12608 12609 if (strcmp(pd->dtpd_func, match->dtpd_func)) 12610 continue; 12611 12612 if (strcmp(pd->dtpd_name, match->dtpd_name)) 12613 continue; 12614 12615 /* 12616 * We have a winning probe! Add it to our growing 12617 * enabling. 12618 */ 12619 found = 1; 12620 dtrace_enabling_addlike(new, ep, create); 12621 } 12622 } 12623 12624 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 12625 dtrace_enabling_destroy(new); 12626 return (err); 12627 } 12628 12629 return (0); 12630 } 12631 12632 static void 12633 dtrace_enabling_retract(dtrace_state_t *state) 12634 { 12635 dtrace_enabling_t *enab, *next; 12636 12637 ASSERT(MUTEX_HELD(&dtrace_lock)); 12638 12639 /* 12640 * Iterate over all retained enablings, destroy the enablings retained 12641 * for the specified state. 12642 */ 12643 for (enab = dtrace_retained; enab != NULL; enab = next) { 12644 next = enab->dten_next; 12645 12646 /* 12647 * dtvs_state can only be NULL for helper enablings -- and 12648 * helper enablings can't be retained. 12649 */ 12650 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12651 12652 if (enab->dten_vstate->dtvs_state == state) { 12653 ASSERT(state->dts_nretained > 0); 12654 dtrace_enabling_destroy(enab); 12655 } 12656 } 12657 12658 ASSERT(state->dts_nretained == 0); 12659 } 12660 12661 static int 12662 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 12663 { 12664 int i = 0; 12665 int matched = 0; 12666 12667 ASSERT(MUTEX_HELD(&cpu_lock)); 12668 ASSERT(MUTEX_HELD(&dtrace_lock)); 12669 12670 for (i = 0; i < enab->dten_ndesc; i++) { 12671 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12672 12673 enab->dten_current = ep; 12674 enab->dten_error = 0; 12675 12676 matched += dtrace_probe_enable(&ep->dted_probe, enab); 12677 12678 if (enab->dten_error != 0) { 12679 /* 12680 * If we get an error half-way through enabling the 12681 * probes, we kick out -- perhaps with some number of 12682 * them enabled. Leaving enabled probes enabled may 12683 * be slightly confusing for user-level, but we expect 12684 * that no one will attempt to actually drive on in 12685 * the face of such errors. If this is an anonymous 12686 * enabling (indicated with a NULL nmatched pointer), 12687 * we cmn_err() a message. We aren't expecting to 12688 * get such an error -- such as it can exist at all, 12689 * it would be a result of corrupted DOF in the driver 12690 * properties. 12691 */ 12692 if (nmatched == NULL) { 12693 cmn_err(CE_WARN, "dtrace_enabling_match() " 12694 "error on %p: %d", (void *)ep, 12695 enab->dten_error); 12696 } 12697 12698 return (enab->dten_error); 12699 } 12700 } 12701 12702 enab->dten_probegen = dtrace_probegen; 12703 if (nmatched != NULL) 12704 *nmatched = matched; 12705 12706 return (0); 12707 } 12708 12709 static void 12710 dtrace_enabling_matchall(void) 12711 { 12712 dtrace_enabling_t *enab; 12713 12714 mutex_enter(&cpu_lock); 12715 mutex_enter(&dtrace_lock); 12716 12717 /* 12718 * Iterate over all retained enablings to see if any probes match 12719 * against them. We only perform this operation on enablings for which 12720 * we have sufficient permissions by virtue of being in the global zone 12721 * or in the same zone as the DTrace client. Because we can be called 12722 * after dtrace_detach() has been called, we cannot assert that there 12723 * are retained enablings. We can safely load from dtrace_retained, 12724 * however: the taskq_destroy() at the end of dtrace_detach() will 12725 * block pending our completion. 12726 */ 12727 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12728 #ifdef illumos 12729 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 12730 12731 if (INGLOBALZONE(curproc) || 12732 cr != NULL && getzoneid() == crgetzoneid(cr)) 12733 #endif 12734 (void) dtrace_enabling_match(enab, NULL); 12735 } 12736 12737 mutex_exit(&dtrace_lock); 12738 mutex_exit(&cpu_lock); 12739 } 12740 12741 /* 12742 * If an enabling is to be enabled without having matched probes (that is, if 12743 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 12744 * enabling must be _primed_ by creating an ECB for every ECB description. 12745 * This must be done to assure that we know the number of speculations, the 12746 * number of aggregations, the minimum buffer size needed, etc. before we 12747 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 12748 * enabling any probes, we create ECBs for every ECB decription, but with a 12749 * NULL probe -- which is exactly what this function does. 12750 */ 12751 static void 12752 dtrace_enabling_prime(dtrace_state_t *state) 12753 { 12754 dtrace_enabling_t *enab; 12755 int i; 12756 12757 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 12758 ASSERT(enab->dten_vstate->dtvs_state != NULL); 12759 12760 if (enab->dten_vstate->dtvs_state != state) 12761 continue; 12762 12763 /* 12764 * We don't want to prime an enabling more than once, lest 12765 * we allow a malicious user to induce resource exhaustion. 12766 * (The ECBs that result from priming an enabling aren't 12767 * leaked -- but they also aren't deallocated until the 12768 * consumer state is destroyed.) 12769 */ 12770 if (enab->dten_primed) 12771 continue; 12772 12773 for (i = 0; i < enab->dten_ndesc; i++) { 12774 enab->dten_current = enab->dten_desc[i]; 12775 (void) dtrace_probe_enable(NULL, enab); 12776 } 12777 12778 enab->dten_primed = 1; 12779 } 12780 } 12781 12782 /* 12783 * Called to indicate that probes should be provided due to retained 12784 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 12785 * must take an initial lap through the enabling calling the dtps_provide() 12786 * entry point explicitly to allow for autocreated probes. 12787 */ 12788 static void 12789 dtrace_enabling_provide(dtrace_provider_t *prv) 12790 { 12791 int i, all = 0; 12792 dtrace_probedesc_t desc; 12793 dtrace_genid_t gen; 12794 12795 ASSERT(MUTEX_HELD(&dtrace_lock)); 12796 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 12797 12798 if (prv == NULL) { 12799 all = 1; 12800 prv = dtrace_provider; 12801 } 12802 12803 do { 12804 dtrace_enabling_t *enab; 12805 void *parg = prv->dtpv_arg; 12806 12807 retry: 12808 gen = dtrace_retained_gen; 12809 for (enab = dtrace_retained; enab != NULL; 12810 enab = enab->dten_next) { 12811 for (i = 0; i < enab->dten_ndesc; i++) { 12812 desc = enab->dten_desc[i]->dted_probe; 12813 mutex_exit(&dtrace_lock); 12814 prv->dtpv_pops.dtps_provide(parg, &desc); 12815 mutex_enter(&dtrace_lock); 12816 /* 12817 * Process the retained enablings again if 12818 * they have changed while we weren't holding 12819 * dtrace_lock. 12820 */ 12821 if (gen != dtrace_retained_gen) 12822 goto retry; 12823 } 12824 } 12825 } while (all && (prv = prv->dtpv_next) != NULL); 12826 12827 mutex_exit(&dtrace_lock); 12828 dtrace_probe_provide(NULL, all ? NULL : prv); 12829 mutex_enter(&dtrace_lock); 12830 } 12831 12832 /* 12833 * Called to reap ECBs that are attached to probes from defunct providers. 12834 */ 12835 static void 12836 dtrace_enabling_reap(void) 12837 { 12838 dtrace_provider_t *prov; 12839 dtrace_probe_t *probe; 12840 dtrace_ecb_t *ecb; 12841 hrtime_t when; 12842 int i; 12843 12844 mutex_enter(&cpu_lock); 12845 mutex_enter(&dtrace_lock); 12846 12847 for (i = 0; i < dtrace_nprobes; i++) { 12848 if ((probe = dtrace_probes[i]) == NULL) 12849 continue; 12850 12851 if (probe->dtpr_ecb == NULL) 12852 continue; 12853 12854 prov = probe->dtpr_provider; 12855 12856 if ((when = prov->dtpv_defunct) == 0) 12857 continue; 12858 12859 /* 12860 * We have ECBs on a defunct provider: we want to reap these 12861 * ECBs to allow the provider to unregister. The destruction 12862 * of these ECBs must be done carefully: if we destroy the ECB 12863 * and the consumer later wishes to consume an EPID that 12864 * corresponds to the destroyed ECB (and if the EPID metadata 12865 * has not been previously consumed), the consumer will abort 12866 * processing on the unknown EPID. To reduce (but not, sadly, 12867 * eliminate) the possibility of this, we will only destroy an 12868 * ECB for a defunct provider if, for the state that 12869 * corresponds to the ECB: 12870 * 12871 * (a) There is no speculative tracing (which can effectively 12872 * cache an EPID for an arbitrary amount of time). 12873 * 12874 * (b) The principal buffers have been switched twice since the 12875 * provider became defunct. 12876 * 12877 * (c) The aggregation buffers are of zero size or have been 12878 * switched twice since the provider became defunct. 12879 * 12880 * We use dts_speculates to determine (a) and call a function 12881 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 12882 * that as soon as we've been unable to destroy one of the ECBs 12883 * associated with the probe, we quit trying -- reaping is only 12884 * fruitful in as much as we can destroy all ECBs associated 12885 * with the defunct provider's probes. 12886 */ 12887 while ((ecb = probe->dtpr_ecb) != NULL) { 12888 dtrace_state_t *state = ecb->dte_state; 12889 dtrace_buffer_t *buf = state->dts_buffer; 12890 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 12891 12892 if (state->dts_speculates) 12893 break; 12894 12895 if (!dtrace_buffer_consumed(buf, when)) 12896 break; 12897 12898 if (!dtrace_buffer_consumed(aggbuf, when)) 12899 break; 12900 12901 dtrace_ecb_disable(ecb); 12902 ASSERT(probe->dtpr_ecb != ecb); 12903 dtrace_ecb_destroy(ecb); 12904 } 12905 } 12906 12907 mutex_exit(&dtrace_lock); 12908 mutex_exit(&cpu_lock); 12909 } 12910 12911 /* 12912 * DTrace DOF Functions 12913 */ 12914 /*ARGSUSED*/ 12915 static void 12916 dtrace_dof_error(dof_hdr_t *dof, const char *str) 12917 { 12918 if (dtrace_err_verbose) 12919 cmn_err(CE_WARN, "failed to process DOF: %s", str); 12920 12921 #ifdef DTRACE_ERRDEBUG 12922 dtrace_errdebug(str); 12923 #endif 12924 } 12925 12926 /* 12927 * Create DOF out of a currently enabled state. Right now, we only create 12928 * DOF containing the run-time options -- but this could be expanded to create 12929 * complete DOF representing the enabled state. 12930 */ 12931 static dof_hdr_t * 12932 dtrace_dof_create(dtrace_state_t *state) 12933 { 12934 dof_hdr_t *dof; 12935 dof_sec_t *sec; 12936 dof_optdesc_t *opt; 12937 int i, len = sizeof (dof_hdr_t) + 12938 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 12939 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 12940 12941 ASSERT(MUTEX_HELD(&dtrace_lock)); 12942 12943 dof = kmem_zalloc(len, KM_SLEEP); 12944 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 12945 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 12946 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 12947 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 12948 12949 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 12950 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 12951 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 12952 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 12953 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 12954 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 12955 12956 dof->dofh_flags = 0; 12957 dof->dofh_hdrsize = sizeof (dof_hdr_t); 12958 dof->dofh_secsize = sizeof (dof_sec_t); 12959 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 12960 dof->dofh_secoff = sizeof (dof_hdr_t); 12961 dof->dofh_loadsz = len; 12962 dof->dofh_filesz = len; 12963 dof->dofh_pad = 0; 12964 12965 /* 12966 * Fill in the option section header... 12967 */ 12968 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 12969 sec->dofs_type = DOF_SECT_OPTDESC; 12970 sec->dofs_align = sizeof (uint64_t); 12971 sec->dofs_flags = DOF_SECF_LOAD; 12972 sec->dofs_entsize = sizeof (dof_optdesc_t); 12973 12974 opt = (dof_optdesc_t *)((uintptr_t)sec + 12975 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 12976 12977 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 12978 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 12979 12980 for (i = 0; i < DTRACEOPT_MAX; i++) { 12981 opt[i].dofo_option = i; 12982 opt[i].dofo_strtab = DOF_SECIDX_NONE; 12983 opt[i].dofo_value = state->dts_options[i]; 12984 } 12985 12986 return (dof); 12987 } 12988 12989 static dof_hdr_t * 12990 dtrace_dof_copyin(uintptr_t uarg, int *errp) 12991 { 12992 dof_hdr_t hdr, *dof; 12993 12994 ASSERT(!MUTEX_HELD(&dtrace_lock)); 12995 12996 /* 12997 * First, we're going to copyin() the sizeof (dof_hdr_t). 12998 */ 12999 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 13000 dtrace_dof_error(NULL, "failed to copyin DOF header"); 13001 *errp = EFAULT; 13002 return (NULL); 13003 } 13004 13005 /* 13006 * Now we'll allocate the entire DOF and copy it in -- provided 13007 * that the length isn't outrageous. 13008 */ 13009 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 13010 dtrace_dof_error(&hdr, "load size exceeds maximum"); 13011 *errp = E2BIG; 13012 return (NULL); 13013 } 13014 13015 if (hdr.dofh_loadsz < sizeof (hdr)) { 13016 dtrace_dof_error(&hdr, "invalid load size"); 13017 *errp = EINVAL; 13018 return (NULL); 13019 } 13020 13021 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 13022 13023 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 13024 dof->dofh_loadsz != hdr.dofh_loadsz) { 13025 kmem_free(dof, hdr.dofh_loadsz); 13026 *errp = EFAULT; 13027 return (NULL); 13028 } 13029 13030 return (dof); 13031 } 13032 13033 #ifndef illumos 13034 static __inline uchar_t 13035 dtrace_dof_char(char c) { 13036 switch (c) { 13037 case '0': 13038 case '1': 13039 case '2': 13040 case '3': 13041 case '4': 13042 case '5': 13043 case '6': 13044 case '7': 13045 case '8': 13046 case '9': 13047 return (c - '0'); 13048 case 'A': 13049 case 'B': 13050 case 'C': 13051 case 'D': 13052 case 'E': 13053 case 'F': 13054 return (c - 'A' + 10); 13055 case 'a': 13056 case 'b': 13057 case 'c': 13058 case 'd': 13059 case 'e': 13060 case 'f': 13061 return (c - 'a' + 10); 13062 } 13063 /* Should not reach here. */ 13064 return (0); 13065 } 13066 #endif 13067 13068 static dof_hdr_t * 13069 dtrace_dof_property(const char *name) 13070 { 13071 uchar_t *buf; 13072 uint64_t loadsz; 13073 unsigned int len, i; 13074 dof_hdr_t *dof; 13075 13076 #ifdef illumos 13077 /* 13078 * Unfortunately, array of values in .conf files are always (and 13079 * only) interpreted to be integer arrays. We must read our DOF 13080 * as an integer array, and then squeeze it into a byte array. 13081 */ 13082 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 13083 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 13084 return (NULL); 13085 13086 for (i = 0; i < len; i++) 13087 buf[i] = (uchar_t)(((int *)buf)[i]); 13088 13089 if (len < sizeof (dof_hdr_t)) { 13090 ddi_prop_free(buf); 13091 dtrace_dof_error(NULL, "truncated header"); 13092 return (NULL); 13093 } 13094 13095 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 13096 ddi_prop_free(buf); 13097 dtrace_dof_error(NULL, "truncated DOF"); 13098 return (NULL); 13099 } 13100 13101 if (loadsz >= dtrace_dof_maxsize) { 13102 ddi_prop_free(buf); 13103 dtrace_dof_error(NULL, "oversized DOF"); 13104 return (NULL); 13105 } 13106 13107 dof = kmem_alloc(loadsz, KM_SLEEP); 13108 bcopy(buf, dof, loadsz); 13109 ddi_prop_free(buf); 13110 #else 13111 char *p; 13112 char *p_env; 13113 13114 if ((p_env = kern_getenv(name)) == NULL) 13115 return (NULL); 13116 13117 len = strlen(p_env) / 2; 13118 13119 buf = kmem_alloc(len, KM_SLEEP); 13120 13121 dof = (dof_hdr_t *) buf; 13122 13123 p = p_env; 13124 13125 for (i = 0; i < len; i++) { 13126 buf[i] = (dtrace_dof_char(p[0]) << 4) | 13127 dtrace_dof_char(p[1]); 13128 p += 2; 13129 } 13130 13131 freeenv(p_env); 13132 13133 if (len < sizeof (dof_hdr_t)) { 13134 kmem_free(buf, 0); 13135 dtrace_dof_error(NULL, "truncated header"); 13136 return (NULL); 13137 } 13138 13139 if (len < (loadsz = dof->dofh_loadsz)) { 13140 kmem_free(buf, 0); 13141 dtrace_dof_error(NULL, "truncated DOF"); 13142 return (NULL); 13143 } 13144 13145 if (loadsz >= dtrace_dof_maxsize) { 13146 kmem_free(buf, 0); 13147 dtrace_dof_error(NULL, "oversized DOF"); 13148 return (NULL); 13149 } 13150 #endif 13151 13152 return (dof); 13153 } 13154 13155 static void 13156 dtrace_dof_destroy(dof_hdr_t *dof) 13157 { 13158 kmem_free(dof, dof->dofh_loadsz); 13159 } 13160 13161 /* 13162 * Return the dof_sec_t pointer corresponding to a given section index. If the 13163 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 13164 * a type other than DOF_SECT_NONE is specified, the header is checked against 13165 * this type and NULL is returned if the types do not match. 13166 */ 13167 static dof_sec_t * 13168 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 13169 { 13170 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 13171 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 13172 13173 if (i >= dof->dofh_secnum) { 13174 dtrace_dof_error(dof, "referenced section index is invalid"); 13175 return (NULL); 13176 } 13177 13178 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 13179 dtrace_dof_error(dof, "referenced section is not loadable"); 13180 return (NULL); 13181 } 13182 13183 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 13184 dtrace_dof_error(dof, "referenced section is the wrong type"); 13185 return (NULL); 13186 } 13187 13188 return (sec); 13189 } 13190 13191 static dtrace_probedesc_t * 13192 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 13193 { 13194 dof_probedesc_t *probe; 13195 dof_sec_t *strtab; 13196 uintptr_t daddr = (uintptr_t)dof; 13197 uintptr_t str; 13198 size_t size; 13199 13200 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 13201 dtrace_dof_error(dof, "invalid probe section"); 13202 return (NULL); 13203 } 13204 13205 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13206 dtrace_dof_error(dof, "bad alignment in probe description"); 13207 return (NULL); 13208 } 13209 13210 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 13211 dtrace_dof_error(dof, "truncated probe description"); 13212 return (NULL); 13213 } 13214 13215 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 13216 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 13217 13218 if (strtab == NULL) 13219 return (NULL); 13220 13221 str = daddr + strtab->dofs_offset; 13222 size = strtab->dofs_size; 13223 13224 if (probe->dofp_provider >= strtab->dofs_size) { 13225 dtrace_dof_error(dof, "corrupt probe provider"); 13226 return (NULL); 13227 } 13228 13229 (void) strncpy(desc->dtpd_provider, 13230 (char *)(str + probe->dofp_provider), 13231 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 13232 13233 if (probe->dofp_mod >= strtab->dofs_size) { 13234 dtrace_dof_error(dof, "corrupt probe module"); 13235 return (NULL); 13236 } 13237 13238 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 13239 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 13240 13241 if (probe->dofp_func >= strtab->dofs_size) { 13242 dtrace_dof_error(dof, "corrupt probe function"); 13243 return (NULL); 13244 } 13245 13246 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 13247 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 13248 13249 if (probe->dofp_name >= strtab->dofs_size) { 13250 dtrace_dof_error(dof, "corrupt probe name"); 13251 return (NULL); 13252 } 13253 13254 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 13255 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 13256 13257 return (desc); 13258 } 13259 13260 static dtrace_difo_t * 13261 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13262 cred_t *cr) 13263 { 13264 dtrace_difo_t *dp; 13265 size_t ttl = 0; 13266 dof_difohdr_t *dofd; 13267 uintptr_t daddr = (uintptr_t)dof; 13268 size_t max = dtrace_difo_maxsize; 13269 int i, l, n; 13270 13271 static const struct { 13272 int section; 13273 int bufoffs; 13274 int lenoffs; 13275 int entsize; 13276 int align; 13277 const char *msg; 13278 } difo[] = { 13279 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 13280 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 13281 sizeof (dif_instr_t), "multiple DIF sections" }, 13282 13283 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 13284 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 13285 sizeof (uint64_t), "multiple integer tables" }, 13286 13287 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 13288 offsetof(dtrace_difo_t, dtdo_strlen), 0, 13289 sizeof (char), "multiple string tables" }, 13290 13291 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 13292 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 13293 sizeof (uint_t), "multiple variable tables" }, 13294 13295 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 13296 }; 13297 13298 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 13299 dtrace_dof_error(dof, "invalid DIFO header section"); 13300 return (NULL); 13301 } 13302 13303 if (sec->dofs_align != sizeof (dof_secidx_t)) { 13304 dtrace_dof_error(dof, "bad alignment in DIFO header"); 13305 return (NULL); 13306 } 13307 13308 if (sec->dofs_size < sizeof (dof_difohdr_t) || 13309 sec->dofs_size % sizeof (dof_secidx_t)) { 13310 dtrace_dof_error(dof, "bad size in DIFO header"); 13311 return (NULL); 13312 } 13313 13314 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13315 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 13316 13317 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 13318 dp->dtdo_rtype = dofd->dofd_rtype; 13319 13320 for (l = 0; l < n; l++) { 13321 dof_sec_t *subsec; 13322 void **bufp; 13323 uint32_t *lenp; 13324 13325 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 13326 dofd->dofd_links[l])) == NULL) 13327 goto err; /* invalid section link */ 13328 13329 if (ttl + subsec->dofs_size > max) { 13330 dtrace_dof_error(dof, "exceeds maximum size"); 13331 goto err; 13332 } 13333 13334 ttl += subsec->dofs_size; 13335 13336 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 13337 if (subsec->dofs_type != difo[i].section) 13338 continue; 13339 13340 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 13341 dtrace_dof_error(dof, "section not loaded"); 13342 goto err; 13343 } 13344 13345 if (subsec->dofs_align != difo[i].align) { 13346 dtrace_dof_error(dof, "bad alignment"); 13347 goto err; 13348 } 13349 13350 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 13351 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 13352 13353 if (*bufp != NULL) { 13354 dtrace_dof_error(dof, difo[i].msg); 13355 goto err; 13356 } 13357 13358 if (difo[i].entsize != subsec->dofs_entsize) { 13359 dtrace_dof_error(dof, "entry size mismatch"); 13360 goto err; 13361 } 13362 13363 if (subsec->dofs_entsize != 0 && 13364 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 13365 dtrace_dof_error(dof, "corrupt entry size"); 13366 goto err; 13367 } 13368 13369 *lenp = subsec->dofs_size; 13370 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 13371 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 13372 *bufp, subsec->dofs_size); 13373 13374 if (subsec->dofs_entsize != 0) 13375 *lenp /= subsec->dofs_entsize; 13376 13377 break; 13378 } 13379 13380 /* 13381 * If we encounter a loadable DIFO sub-section that is not 13382 * known to us, assume this is a broken program and fail. 13383 */ 13384 if (difo[i].section == DOF_SECT_NONE && 13385 (subsec->dofs_flags & DOF_SECF_LOAD)) { 13386 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 13387 goto err; 13388 } 13389 } 13390 13391 if (dp->dtdo_buf == NULL) { 13392 /* 13393 * We can't have a DIF object without DIF text. 13394 */ 13395 dtrace_dof_error(dof, "missing DIF text"); 13396 goto err; 13397 } 13398 13399 /* 13400 * Before we validate the DIF object, run through the variable table 13401 * looking for the strings -- if any of their size are under, we'll set 13402 * their size to be the system-wide default string size. Note that 13403 * this should _not_ happen if the "strsize" option has been set -- 13404 * in this case, the compiler should have set the size to reflect the 13405 * setting of the option. 13406 */ 13407 for (i = 0; i < dp->dtdo_varlen; i++) { 13408 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 13409 dtrace_diftype_t *t = &v->dtdv_type; 13410 13411 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 13412 continue; 13413 13414 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 13415 t->dtdt_size = dtrace_strsize_default; 13416 } 13417 13418 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 13419 goto err; 13420 13421 dtrace_difo_init(dp, vstate); 13422 return (dp); 13423 13424 err: 13425 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 13426 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 13427 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 13428 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 13429 13430 kmem_free(dp, sizeof (dtrace_difo_t)); 13431 return (NULL); 13432 } 13433 13434 static dtrace_predicate_t * 13435 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13436 cred_t *cr) 13437 { 13438 dtrace_difo_t *dp; 13439 13440 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 13441 return (NULL); 13442 13443 return (dtrace_predicate_create(dp)); 13444 } 13445 13446 static dtrace_actdesc_t * 13447 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13448 cred_t *cr) 13449 { 13450 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 13451 dof_actdesc_t *desc; 13452 dof_sec_t *difosec; 13453 size_t offs; 13454 uintptr_t daddr = (uintptr_t)dof; 13455 uint64_t arg; 13456 dtrace_actkind_t kind; 13457 13458 if (sec->dofs_type != DOF_SECT_ACTDESC) { 13459 dtrace_dof_error(dof, "invalid action section"); 13460 return (NULL); 13461 } 13462 13463 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 13464 dtrace_dof_error(dof, "truncated action description"); 13465 return (NULL); 13466 } 13467 13468 if (sec->dofs_align != sizeof (uint64_t)) { 13469 dtrace_dof_error(dof, "bad alignment in action description"); 13470 return (NULL); 13471 } 13472 13473 if (sec->dofs_size < sec->dofs_entsize) { 13474 dtrace_dof_error(dof, "section entry size exceeds total size"); 13475 return (NULL); 13476 } 13477 13478 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 13479 dtrace_dof_error(dof, "bad entry size in action description"); 13480 return (NULL); 13481 } 13482 13483 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 13484 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 13485 return (NULL); 13486 } 13487 13488 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 13489 desc = (dof_actdesc_t *)(daddr + 13490 (uintptr_t)sec->dofs_offset + offs); 13491 kind = (dtrace_actkind_t)desc->dofa_kind; 13492 13493 if ((DTRACEACT_ISPRINTFLIKE(kind) && 13494 (kind != DTRACEACT_PRINTA || 13495 desc->dofa_strtab != DOF_SECIDX_NONE)) || 13496 (kind == DTRACEACT_DIFEXPR && 13497 desc->dofa_strtab != DOF_SECIDX_NONE)) { 13498 dof_sec_t *strtab; 13499 char *str, *fmt; 13500 uint64_t i; 13501 13502 /* 13503 * The argument to these actions is an index into the 13504 * DOF string table. For printf()-like actions, this 13505 * is the format string. For print(), this is the 13506 * CTF type of the expression result. 13507 */ 13508 if ((strtab = dtrace_dof_sect(dof, 13509 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 13510 goto err; 13511 13512 str = (char *)((uintptr_t)dof + 13513 (uintptr_t)strtab->dofs_offset); 13514 13515 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 13516 if (str[i] == '\0') 13517 break; 13518 } 13519 13520 if (i >= strtab->dofs_size) { 13521 dtrace_dof_error(dof, "bogus format string"); 13522 goto err; 13523 } 13524 13525 if (i == desc->dofa_arg) { 13526 dtrace_dof_error(dof, "empty format string"); 13527 goto err; 13528 } 13529 13530 i -= desc->dofa_arg; 13531 fmt = kmem_alloc(i + 1, KM_SLEEP); 13532 bcopy(&str[desc->dofa_arg], fmt, i + 1); 13533 arg = (uint64_t)(uintptr_t)fmt; 13534 } else { 13535 if (kind == DTRACEACT_PRINTA) { 13536 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 13537 arg = 0; 13538 } else { 13539 arg = desc->dofa_arg; 13540 } 13541 } 13542 13543 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 13544 desc->dofa_uarg, arg); 13545 13546 if (last != NULL) { 13547 last->dtad_next = act; 13548 } else { 13549 first = act; 13550 } 13551 13552 last = act; 13553 13554 if (desc->dofa_difo == DOF_SECIDX_NONE) 13555 continue; 13556 13557 if ((difosec = dtrace_dof_sect(dof, 13558 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 13559 goto err; 13560 13561 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 13562 13563 if (act->dtad_difo == NULL) 13564 goto err; 13565 } 13566 13567 ASSERT(first != NULL); 13568 return (first); 13569 13570 err: 13571 for (act = first; act != NULL; act = next) { 13572 next = act->dtad_next; 13573 dtrace_actdesc_release(act, vstate); 13574 } 13575 13576 return (NULL); 13577 } 13578 13579 static dtrace_ecbdesc_t * 13580 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 13581 cred_t *cr) 13582 { 13583 dtrace_ecbdesc_t *ep; 13584 dof_ecbdesc_t *ecb; 13585 dtrace_probedesc_t *desc; 13586 dtrace_predicate_t *pred = NULL; 13587 13588 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 13589 dtrace_dof_error(dof, "truncated ECB description"); 13590 return (NULL); 13591 } 13592 13593 if (sec->dofs_align != sizeof (uint64_t)) { 13594 dtrace_dof_error(dof, "bad alignment in ECB description"); 13595 return (NULL); 13596 } 13597 13598 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 13599 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 13600 13601 if (sec == NULL) 13602 return (NULL); 13603 13604 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 13605 ep->dted_uarg = ecb->dofe_uarg; 13606 desc = &ep->dted_probe; 13607 13608 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 13609 goto err; 13610 13611 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 13612 if ((sec = dtrace_dof_sect(dof, 13613 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 13614 goto err; 13615 13616 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 13617 goto err; 13618 13619 ep->dted_pred.dtpdd_predicate = pred; 13620 } 13621 13622 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 13623 if ((sec = dtrace_dof_sect(dof, 13624 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 13625 goto err; 13626 13627 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 13628 13629 if (ep->dted_action == NULL) 13630 goto err; 13631 } 13632 13633 return (ep); 13634 13635 err: 13636 if (pred != NULL) 13637 dtrace_predicate_release(pred, vstate); 13638 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 13639 return (NULL); 13640 } 13641 13642 /* 13643 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 13644 * specified DOF. At present, this amounts to simply adding 'ubase' to the 13645 * site of any user SETX relocations to account for load object base address. 13646 * In the future, if we need other relocations, this function can be extended. 13647 */ 13648 static int 13649 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 13650 { 13651 uintptr_t daddr = (uintptr_t)dof; 13652 dof_relohdr_t *dofr = 13653 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 13654 dof_sec_t *ss, *rs, *ts; 13655 dof_relodesc_t *r; 13656 uint_t i, n; 13657 13658 if (sec->dofs_size < sizeof (dof_relohdr_t) || 13659 sec->dofs_align != sizeof (dof_secidx_t)) { 13660 dtrace_dof_error(dof, "invalid relocation header"); 13661 return (-1); 13662 } 13663 13664 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 13665 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 13666 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 13667 13668 if (ss == NULL || rs == NULL || ts == NULL) 13669 return (-1); /* dtrace_dof_error() has been called already */ 13670 13671 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 13672 rs->dofs_align != sizeof (uint64_t)) { 13673 dtrace_dof_error(dof, "invalid relocation section"); 13674 return (-1); 13675 } 13676 13677 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 13678 n = rs->dofs_size / rs->dofs_entsize; 13679 13680 for (i = 0; i < n; i++) { 13681 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 13682 13683 switch (r->dofr_type) { 13684 case DOF_RELO_NONE: 13685 break; 13686 case DOF_RELO_SETX: 13687 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 13688 sizeof (uint64_t) > ts->dofs_size) { 13689 dtrace_dof_error(dof, "bad relocation offset"); 13690 return (-1); 13691 } 13692 13693 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 13694 dtrace_dof_error(dof, "misaligned setx relo"); 13695 return (-1); 13696 } 13697 13698 *(uint64_t *)taddr += ubase; 13699 break; 13700 default: 13701 dtrace_dof_error(dof, "invalid relocation type"); 13702 return (-1); 13703 } 13704 13705 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 13706 } 13707 13708 return (0); 13709 } 13710 13711 /* 13712 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 13713 * header: it should be at the front of a memory region that is at least 13714 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 13715 * size. It need not be validated in any other way. 13716 */ 13717 static int 13718 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 13719 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 13720 { 13721 uint64_t len = dof->dofh_loadsz, seclen; 13722 uintptr_t daddr = (uintptr_t)dof; 13723 dtrace_ecbdesc_t *ep; 13724 dtrace_enabling_t *enab; 13725 uint_t i; 13726 13727 ASSERT(MUTEX_HELD(&dtrace_lock)); 13728 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 13729 13730 /* 13731 * Check the DOF header identification bytes. In addition to checking 13732 * valid settings, we also verify that unused bits/bytes are zeroed so 13733 * we can use them later without fear of regressing existing binaries. 13734 */ 13735 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 13736 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 13737 dtrace_dof_error(dof, "DOF magic string mismatch"); 13738 return (-1); 13739 } 13740 13741 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 13742 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 13743 dtrace_dof_error(dof, "DOF has invalid data model"); 13744 return (-1); 13745 } 13746 13747 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 13748 dtrace_dof_error(dof, "DOF encoding mismatch"); 13749 return (-1); 13750 } 13751 13752 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13753 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 13754 dtrace_dof_error(dof, "DOF version mismatch"); 13755 return (-1); 13756 } 13757 13758 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 13759 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 13760 return (-1); 13761 } 13762 13763 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 13764 dtrace_dof_error(dof, "DOF uses too many integer registers"); 13765 return (-1); 13766 } 13767 13768 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 13769 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 13770 return (-1); 13771 } 13772 13773 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 13774 if (dof->dofh_ident[i] != 0) { 13775 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 13776 return (-1); 13777 } 13778 } 13779 13780 if (dof->dofh_flags & ~DOF_FL_VALID) { 13781 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 13782 return (-1); 13783 } 13784 13785 if (dof->dofh_secsize == 0) { 13786 dtrace_dof_error(dof, "zero section header size"); 13787 return (-1); 13788 } 13789 13790 /* 13791 * Check that the section headers don't exceed the amount of DOF 13792 * data. Note that we cast the section size and number of sections 13793 * to uint64_t's to prevent possible overflow in the multiplication. 13794 */ 13795 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 13796 13797 if (dof->dofh_secoff > len || seclen > len || 13798 dof->dofh_secoff + seclen > len) { 13799 dtrace_dof_error(dof, "truncated section headers"); 13800 return (-1); 13801 } 13802 13803 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 13804 dtrace_dof_error(dof, "misaligned section headers"); 13805 return (-1); 13806 } 13807 13808 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 13809 dtrace_dof_error(dof, "misaligned section size"); 13810 return (-1); 13811 } 13812 13813 /* 13814 * Take an initial pass through the section headers to be sure that 13815 * the headers don't have stray offsets. If the 'noprobes' flag is 13816 * set, do not permit sections relating to providers, probes, or args. 13817 */ 13818 for (i = 0; i < dof->dofh_secnum; i++) { 13819 dof_sec_t *sec = (dof_sec_t *)(daddr + 13820 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13821 13822 if (noprobes) { 13823 switch (sec->dofs_type) { 13824 case DOF_SECT_PROVIDER: 13825 case DOF_SECT_PROBES: 13826 case DOF_SECT_PRARGS: 13827 case DOF_SECT_PROFFS: 13828 dtrace_dof_error(dof, "illegal sections " 13829 "for enabling"); 13830 return (-1); 13831 } 13832 } 13833 13834 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 13835 !(sec->dofs_flags & DOF_SECF_LOAD)) { 13836 dtrace_dof_error(dof, "loadable section with load " 13837 "flag unset"); 13838 return (-1); 13839 } 13840 13841 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 13842 continue; /* just ignore non-loadable sections */ 13843 13844 if (!ISP2(sec->dofs_align)) { 13845 dtrace_dof_error(dof, "bad section alignment"); 13846 return (-1); 13847 } 13848 13849 if (sec->dofs_offset & (sec->dofs_align - 1)) { 13850 dtrace_dof_error(dof, "misaligned section"); 13851 return (-1); 13852 } 13853 13854 if (sec->dofs_offset > len || sec->dofs_size > len || 13855 sec->dofs_offset + sec->dofs_size > len) { 13856 dtrace_dof_error(dof, "corrupt section header"); 13857 return (-1); 13858 } 13859 13860 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 13861 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 13862 dtrace_dof_error(dof, "non-terminating string table"); 13863 return (-1); 13864 } 13865 } 13866 13867 /* 13868 * Take a second pass through the sections and locate and perform any 13869 * relocations that are present. We do this after the first pass to 13870 * be sure that all sections have had their headers validated. 13871 */ 13872 for (i = 0; i < dof->dofh_secnum; i++) { 13873 dof_sec_t *sec = (dof_sec_t *)(daddr + 13874 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13875 13876 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 13877 continue; /* skip sections that are not loadable */ 13878 13879 switch (sec->dofs_type) { 13880 case DOF_SECT_URELHDR: 13881 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 13882 return (-1); 13883 break; 13884 } 13885 } 13886 13887 if ((enab = *enabp) == NULL) 13888 enab = *enabp = dtrace_enabling_create(vstate); 13889 13890 for (i = 0; i < dof->dofh_secnum; i++) { 13891 dof_sec_t *sec = (dof_sec_t *)(daddr + 13892 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13893 13894 if (sec->dofs_type != DOF_SECT_ECBDESC) 13895 continue; 13896 13897 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 13898 dtrace_enabling_destroy(enab); 13899 *enabp = NULL; 13900 return (-1); 13901 } 13902 13903 dtrace_enabling_add(enab, ep); 13904 } 13905 13906 return (0); 13907 } 13908 13909 /* 13910 * Process DOF for any options. This routine assumes that the DOF has been 13911 * at least processed by dtrace_dof_slurp(). 13912 */ 13913 static int 13914 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 13915 { 13916 int i, rval; 13917 uint32_t entsize; 13918 size_t offs; 13919 dof_optdesc_t *desc; 13920 13921 for (i = 0; i < dof->dofh_secnum; i++) { 13922 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 13923 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 13924 13925 if (sec->dofs_type != DOF_SECT_OPTDESC) 13926 continue; 13927 13928 if (sec->dofs_align != sizeof (uint64_t)) { 13929 dtrace_dof_error(dof, "bad alignment in " 13930 "option description"); 13931 return (EINVAL); 13932 } 13933 13934 if ((entsize = sec->dofs_entsize) == 0) { 13935 dtrace_dof_error(dof, "zeroed option entry size"); 13936 return (EINVAL); 13937 } 13938 13939 if (entsize < sizeof (dof_optdesc_t)) { 13940 dtrace_dof_error(dof, "bad option entry size"); 13941 return (EINVAL); 13942 } 13943 13944 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 13945 desc = (dof_optdesc_t *)((uintptr_t)dof + 13946 (uintptr_t)sec->dofs_offset + offs); 13947 13948 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 13949 dtrace_dof_error(dof, "non-zero option string"); 13950 return (EINVAL); 13951 } 13952 13953 if (desc->dofo_value == DTRACEOPT_UNSET) { 13954 dtrace_dof_error(dof, "unset option"); 13955 return (EINVAL); 13956 } 13957 13958 if ((rval = dtrace_state_option(state, 13959 desc->dofo_option, desc->dofo_value)) != 0) { 13960 dtrace_dof_error(dof, "rejected option"); 13961 return (rval); 13962 } 13963 } 13964 } 13965 13966 return (0); 13967 } 13968 13969 /* 13970 * DTrace Consumer State Functions 13971 */ 13972 static int 13973 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 13974 { 13975 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 13976 void *base; 13977 uintptr_t limit; 13978 dtrace_dynvar_t *dvar, *next, *start; 13979 int i; 13980 13981 ASSERT(MUTEX_HELD(&dtrace_lock)); 13982 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 13983 13984 bzero(dstate, sizeof (dtrace_dstate_t)); 13985 13986 if ((dstate->dtds_chunksize = chunksize) == 0) 13987 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 13988 13989 VERIFY(dstate->dtds_chunksize < LONG_MAX); 13990 13991 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 13992 size = min; 13993 13994 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) 13995 return (ENOMEM); 13996 13997 dstate->dtds_size = size; 13998 dstate->dtds_base = base; 13999 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 14000 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 14001 14002 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 14003 14004 if (hashsize != 1 && (hashsize & 1)) 14005 hashsize--; 14006 14007 dstate->dtds_hashsize = hashsize; 14008 dstate->dtds_hash = dstate->dtds_base; 14009 14010 /* 14011 * Set all of our hash buckets to point to the single sink, and (if 14012 * it hasn't already been set), set the sink's hash value to be the 14013 * sink sentinel value. The sink is needed for dynamic variable 14014 * lookups to know that they have iterated over an entire, valid hash 14015 * chain. 14016 */ 14017 for (i = 0; i < hashsize; i++) 14018 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 14019 14020 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 14021 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 14022 14023 /* 14024 * Determine number of active CPUs. Divide free list evenly among 14025 * active CPUs. 14026 */ 14027 start = (dtrace_dynvar_t *) 14028 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 14029 limit = (uintptr_t)base + size; 14030 14031 VERIFY((uintptr_t)start < limit); 14032 VERIFY((uintptr_t)start >= (uintptr_t)base); 14033 14034 maxper = (limit - (uintptr_t)start) / NCPU; 14035 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 14036 14037 #ifndef illumos 14038 CPU_FOREACH(i) { 14039 #else 14040 for (i = 0; i < NCPU; i++) { 14041 #endif 14042 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 14043 14044 /* 14045 * If we don't even have enough chunks to make it once through 14046 * NCPUs, we're just going to allocate everything to the first 14047 * CPU. And if we're on the last CPU, we're going to allocate 14048 * whatever is left over. In either case, we set the limit to 14049 * be the limit of the dynamic variable space. 14050 */ 14051 if (maxper == 0 || i == NCPU - 1) { 14052 limit = (uintptr_t)base + size; 14053 start = NULL; 14054 } else { 14055 limit = (uintptr_t)start + maxper; 14056 start = (dtrace_dynvar_t *)limit; 14057 } 14058 14059 VERIFY(limit <= (uintptr_t)base + size); 14060 14061 for (;;) { 14062 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 14063 dstate->dtds_chunksize); 14064 14065 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 14066 break; 14067 14068 VERIFY((uintptr_t)dvar >= (uintptr_t)base && 14069 (uintptr_t)dvar <= (uintptr_t)base + size); 14070 dvar->dtdv_next = next; 14071 dvar = next; 14072 } 14073 14074 if (maxper == 0) 14075 break; 14076 } 14077 14078 return (0); 14079 } 14080 14081 static void 14082 dtrace_dstate_fini(dtrace_dstate_t *dstate) 14083 { 14084 ASSERT(MUTEX_HELD(&cpu_lock)); 14085 14086 if (dstate->dtds_base == NULL) 14087 return; 14088 14089 kmem_free(dstate->dtds_base, dstate->dtds_size); 14090 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 14091 } 14092 14093 static void 14094 dtrace_vstate_fini(dtrace_vstate_t *vstate) 14095 { 14096 /* 14097 * Logical XOR, where are you? 14098 */ 14099 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 14100 14101 if (vstate->dtvs_nglobals > 0) { 14102 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 14103 sizeof (dtrace_statvar_t *)); 14104 } 14105 14106 if (vstate->dtvs_ntlocals > 0) { 14107 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 14108 sizeof (dtrace_difv_t)); 14109 } 14110 14111 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 14112 14113 if (vstate->dtvs_nlocals > 0) { 14114 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 14115 sizeof (dtrace_statvar_t *)); 14116 } 14117 } 14118 14119 #ifdef illumos 14120 static void 14121 dtrace_state_clean(dtrace_state_t *state) 14122 { 14123 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14124 return; 14125 14126 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14127 dtrace_speculation_clean(state); 14128 } 14129 14130 static void 14131 dtrace_state_deadman(dtrace_state_t *state) 14132 { 14133 hrtime_t now; 14134 14135 dtrace_sync(); 14136 14137 now = dtrace_gethrtime(); 14138 14139 if (state != dtrace_anon.dta_state && 14140 now - state->dts_laststatus >= dtrace_deadman_user) 14141 return; 14142 14143 /* 14144 * We must be sure that dts_alive never appears to be less than the 14145 * value upon entry to dtrace_state_deadman(), and because we lack a 14146 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14147 * store INT64_MAX to it, followed by a memory barrier, followed by 14148 * the new value. This assures that dts_alive never appears to be 14149 * less than its true value, regardless of the order in which the 14150 * stores to the underlying storage are issued. 14151 */ 14152 state->dts_alive = INT64_MAX; 14153 dtrace_membar_producer(); 14154 state->dts_alive = now; 14155 } 14156 #else /* !illumos */ 14157 static void 14158 dtrace_state_clean(void *arg) 14159 { 14160 dtrace_state_t *state = arg; 14161 dtrace_optval_t *opt = state->dts_options; 14162 14163 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 14164 return; 14165 14166 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 14167 dtrace_speculation_clean(state); 14168 14169 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14170 dtrace_state_clean, state); 14171 } 14172 14173 static void 14174 dtrace_state_deadman(void *arg) 14175 { 14176 dtrace_state_t *state = arg; 14177 hrtime_t now; 14178 14179 dtrace_sync(); 14180 14181 dtrace_debug_output(); 14182 14183 now = dtrace_gethrtime(); 14184 14185 if (state != dtrace_anon.dta_state && 14186 now - state->dts_laststatus >= dtrace_deadman_user) 14187 return; 14188 14189 /* 14190 * We must be sure that dts_alive never appears to be less than the 14191 * value upon entry to dtrace_state_deadman(), and because we lack a 14192 * dtrace_cas64(), we cannot store to it atomically. We thus instead 14193 * store INT64_MAX to it, followed by a memory barrier, followed by 14194 * the new value. This assures that dts_alive never appears to be 14195 * less than its true value, regardless of the order in which the 14196 * stores to the underlying storage are issued. 14197 */ 14198 state->dts_alive = INT64_MAX; 14199 dtrace_membar_producer(); 14200 state->dts_alive = now; 14201 14202 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14203 dtrace_state_deadman, state); 14204 } 14205 #endif /* illumos */ 14206 14207 static dtrace_state_t * 14208 #ifdef illumos 14209 dtrace_state_create(dev_t *devp, cred_t *cr) 14210 #else 14211 dtrace_state_create(struct cdev *dev) 14212 #endif 14213 { 14214 #ifdef illumos 14215 minor_t minor; 14216 major_t major; 14217 #else 14218 cred_t *cr = NULL; 14219 int m = 0; 14220 #endif 14221 char c[30]; 14222 dtrace_state_t *state; 14223 dtrace_optval_t *opt; 14224 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 14225 14226 ASSERT(MUTEX_HELD(&dtrace_lock)); 14227 ASSERT(MUTEX_HELD(&cpu_lock)); 14228 14229 #ifdef illumos 14230 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 14231 VM_BESTFIT | VM_SLEEP); 14232 14233 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 14234 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14235 return (NULL); 14236 } 14237 14238 state = ddi_get_soft_state(dtrace_softstate, minor); 14239 #else 14240 if (dev != NULL) { 14241 cr = dev->si_cred; 14242 m = dev2unit(dev); 14243 } 14244 14245 /* Allocate memory for the state. */ 14246 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 14247 #endif 14248 14249 state->dts_epid = DTRACE_EPIDNONE + 1; 14250 14251 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 14252 #ifdef illumos 14253 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 14254 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14255 14256 if (devp != NULL) { 14257 major = getemajor(*devp); 14258 } else { 14259 major = ddi_driver_major(dtrace_devi); 14260 } 14261 14262 state->dts_dev = makedevice(major, minor); 14263 14264 if (devp != NULL) 14265 *devp = state->dts_dev; 14266 #else 14267 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 14268 state->dts_dev = dev; 14269 #endif 14270 14271 /* 14272 * We allocate NCPU buffers. On the one hand, this can be quite 14273 * a bit of memory per instance (nearly 36K on a Starcat). On the 14274 * other hand, it saves an additional memory reference in the probe 14275 * path. 14276 */ 14277 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 14278 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 14279 14280 #ifdef illumos 14281 state->dts_cleaner = CYCLIC_NONE; 14282 state->dts_deadman = CYCLIC_NONE; 14283 #else 14284 callout_init(&state->dts_cleaner, 1); 14285 callout_init(&state->dts_deadman, 1); 14286 #endif 14287 state->dts_vstate.dtvs_state = state; 14288 14289 for (i = 0; i < DTRACEOPT_MAX; i++) 14290 state->dts_options[i] = DTRACEOPT_UNSET; 14291 14292 /* 14293 * Set the default options. 14294 */ 14295 opt = state->dts_options; 14296 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 14297 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 14298 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 14299 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 14300 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 14301 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 14302 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 14303 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 14304 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 14305 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 14306 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 14307 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 14308 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 14309 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 14310 14311 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 14312 14313 /* 14314 * Depending on the user credentials, we set flag bits which alter probe 14315 * visibility or the amount of destructiveness allowed. In the case of 14316 * actual anonymous tracing, or the possession of all privileges, all of 14317 * the normal checks are bypassed. 14318 */ 14319 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 14320 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 14321 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 14322 } else { 14323 /* 14324 * Set up the credentials for this instantiation. We take a 14325 * hold on the credential to prevent it from disappearing on 14326 * us; this in turn prevents the zone_t referenced by this 14327 * credential from disappearing. This means that we can 14328 * examine the credential and the zone from probe context. 14329 */ 14330 crhold(cr); 14331 state->dts_cred.dcr_cred = cr; 14332 14333 /* 14334 * CRA_PROC means "we have *some* privilege for dtrace" and 14335 * unlocks the use of variables like pid, zonename, etc. 14336 */ 14337 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 14338 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14339 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 14340 } 14341 14342 /* 14343 * dtrace_user allows use of syscall and profile providers. 14344 * If the user also has proc_owner and/or proc_zone, we 14345 * extend the scope to include additional visibility and 14346 * destructive power. 14347 */ 14348 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 14349 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 14350 state->dts_cred.dcr_visible |= 14351 DTRACE_CRV_ALLPROC; 14352 14353 state->dts_cred.dcr_action |= 14354 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14355 } 14356 14357 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 14358 state->dts_cred.dcr_visible |= 14359 DTRACE_CRV_ALLZONE; 14360 14361 state->dts_cred.dcr_action |= 14362 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14363 } 14364 14365 /* 14366 * If we have all privs in whatever zone this is, 14367 * we can do destructive things to processes which 14368 * have altered credentials. 14369 */ 14370 #ifdef illumos 14371 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14372 cr->cr_zone->zone_privset)) { 14373 state->dts_cred.dcr_action |= 14374 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14375 } 14376 #endif 14377 } 14378 14379 /* 14380 * Holding the dtrace_kernel privilege also implies that 14381 * the user has the dtrace_user privilege from a visibility 14382 * perspective. But without further privileges, some 14383 * destructive actions are not available. 14384 */ 14385 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 14386 /* 14387 * Make all probes in all zones visible. However, 14388 * this doesn't mean that all actions become available 14389 * to all zones. 14390 */ 14391 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 14392 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 14393 14394 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 14395 DTRACE_CRA_PROC; 14396 /* 14397 * Holding proc_owner means that destructive actions 14398 * for *this* zone are allowed. 14399 */ 14400 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14401 state->dts_cred.dcr_action |= 14402 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14403 14404 /* 14405 * Holding proc_zone means that destructive actions 14406 * for this user/group ID in all zones is allowed. 14407 */ 14408 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14409 state->dts_cred.dcr_action |= 14410 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14411 14412 #ifdef illumos 14413 /* 14414 * If we have all privs in whatever zone this is, 14415 * we can do destructive things to processes which 14416 * have altered credentials. 14417 */ 14418 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 14419 cr->cr_zone->zone_privset)) { 14420 state->dts_cred.dcr_action |= 14421 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 14422 } 14423 #endif 14424 } 14425 14426 /* 14427 * Holding the dtrace_proc privilege gives control over fasttrap 14428 * and pid providers. We need to grant wider destructive 14429 * privileges in the event that the user has proc_owner and/or 14430 * proc_zone. 14431 */ 14432 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 14433 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 14434 state->dts_cred.dcr_action |= 14435 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 14436 14437 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 14438 state->dts_cred.dcr_action |= 14439 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 14440 } 14441 } 14442 14443 return (state); 14444 } 14445 14446 static int 14447 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 14448 { 14449 dtrace_optval_t *opt = state->dts_options, size; 14450 processorid_t cpu = 0;; 14451 int flags = 0, rval, factor, divisor = 1; 14452 14453 ASSERT(MUTEX_HELD(&dtrace_lock)); 14454 ASSERT(MUTEX_HELD(&cpu_lock)); 14455 ASSERT(which < DTRACEOPT_MAX); 14456 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 14457 (state == dtrace_anon.dta_state && 14458 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 14459 14460 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 14461 return (0); 14462 14463 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 14464 cpu = opt[DTRACEOPT_CPU]; 14465 14466 if (which == DTRACEOPT_SPECSIZE) 14467 flags |= DTRACEBUF_NOSWITCH; 14468 14469 if (which == DTRACEOPT_BUFSIZE) { 14470 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 14471 flags |= DTRACEBUF_RING; 14472 14473 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 14474 flags |= DTRACEBUF_FILL; 14475 14476 if (state != dtrace_anon.dta_state || 14477 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14478 flags |= DTRACEBUF_INACTIVE; 14479 } 14480 14481 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { 14482 /* 14483 * The size must be 8-byte aligned. If the size is not 8-byte 14484 * aligned, drop it down by the difference. 14485 */ 14486 if (size & (sizeof (uint64_t) - 1)) 14487 size -= size & (sizeof (uint64_t) - 1); 14488 14489 if (size < state->dts_reserve) { 14490 /* 14491 * Buffers always must be large enough to accommodate 14492 * their prereserved space. We return E2BIG instead 14493 * of ENOMEM in this case to allow for user-level 14494 * software to differentiate the cases. 14495 */ 14496 return (E2BIG); 14497 } 14498 14499 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); 14500 14501 if (rval != ENOMEM) { 14502 opt[which] = size; 14503 return (rval); 14504 } 14505 14506 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14507 return (rval); 14508 14509 for (divisor = 2; divisor < factor; divisor <<= 1) 14510 continue; 14511 } 14512 14513 return (ENOMEM); 14514 } 14515 14516 static int 14517 dtrace_state_buffers(dtrace_state_t *state) 14518 { 14519 dtrace_speculation_t *spec = state->dts_speculations; 14520 int rval, i; 14521 14522 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 14523 DTRACEOPT_BUFSIZE)) != 0) 14524 return (rval); 14525 14526 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 14527 DTRACEOPT_AGGSIZE)) != 0) 14528 return (rval); 14529 14530 for (i = 0; i < state->dts_nspeculations; i++) { 14531 if ((rval = dtrace_state_buffer(state, 14532 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 14533 return (rval); 14534 } 14535 14536 return (0); 14537 } 14538 14539 static void 14540 dtrace_state_prereserve(dtrace_state_t *state) 14541 { 14542 dtrace_ecb_t *ecb; 14543 dtrace_probe_t *probe; 14544 14545 state->dts_reserve = 0; 14546 14547 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 14548 return; 14549 14550 /* 14551 * If our buffer policy is a "fill" buffer policy, we need to set the 14552 * prereserved space to be the space required by the END probes. 14553 */ 14554 probe = dtrace_probes[dtrace_probeid_end - 1]; 14555 ASSERT(probe != NULL); 14556 14557 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 14558 if (ecb->dte_state != state) 14559 continue; 14560 14561 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 14562 } 14563 } 14564 14565 static int 14566 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 14567 { 14568 dtrace_optval_t *opt = state->dts_options, sz, nspec; 14569 dtrace_speculation_t *spec; 14570 dtrace_buffer_t *buf; 14571 #ifdef illumos 14572 cyc_handler_t hdlr; 14573 cyc_time_t when; 14574 #endif 14575 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 14576 dtrace_icookie_t cookie; 14577 14578 mutex_enter(&cpu_lock); 14579 mutex_enter(&dtrace_lock); 14580 14581 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14582 rval = EBUSY; 14583 goto out; 14584 } 14585 14586 /* 14587 * Before we can perform any checks, we must prime all of the 14588 * retained enablings that correspond to this state. 14589 */ 14590 dtrace_enabling_prime(state); 14591 14592 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 14593 rval = EACCES; 14594 goto out; 14595 } 14596 14597 dtrace_state_prereserve(state); 14598 14599 /* 14600 * Now we want to do is try to allocate our speculations. 14601 * We do not automatically resize the number of speculations; if 14602 * this fails, we will fail the operation. 14603 */ 14604 nspec = opt[DTRACEOPT_NSPEC]; 14605 ASSERT(nspec != DTRACEOPT_UNSET); 14606 14607 if (nspec > INT_MAX) { 14608 rval = ENOMEM; 14609 goto out; 14610 } 14611 14612 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), 14613 KM_NOSLEEP | KM_NORMALPRI); 14614 14615 if (spec == NULL) { 14616 rval = ENOMEM; 14617 goto out; 14618 } 14619 14620 state->dts_speculations = spec; 14621 state->dts_nspeculations = (int)nspec; 14622 14623 for (i = 0; i < nspec; i++) { 14624 if ((buf = kmem_zalloc(bufsize, 14625 KM_NOSLEEP | KM_NORMALPRI)) == NULL) { 14626 rval = ENOMEM; 14627 goto err; 14628 } 14629 14630 spec[i].dtsp_buffer = buf; 14631 } 14632 14633 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 14634 if (dtrace_anon.dta_state == NULL) { 14635 rval = ENOENT; 14636 goto out; 14637 } 14638 14639 if (state->dts_necbs != 0) { 14640 rval = EALREADY; 14641 goto out; 14642 } 14643 14644 state->dts_anon = dtrace_anon_grab(); 14645 ASSERT(state->dts_anon != NULL); 14646 state = state->dts_anon; 14647 14648 /* 14649 * We want "grabanon" to be set in the grabbed state, so we'll 14650 * copy that option value from the grabbing state into the 14651 * grabbed state. 14652 */ 14653 state->dts_options[DTRACEOPT_GRABANON] = 14654 opt[DTRACEOPT_GRABANON]; 14655 14656 *cpu = dtrace_anon.dta_beganon; 14657 14658 /* 14659 * If the anonymous state is active (as it almost certainly 14660 * is if the anonymous enabling ultimately matched anything), 14661 * we don't allow any further option processing -- but we 14662 * don't return failure. 14663 */ 14664 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14665 goto out; 14666 } 14667 14668 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 14669 opt[DTRACEOPT_AGGSIZE] != 0) { 14670 if (state->dts_aggregations == NULL) { 14671 /* 14672 * We're not going to create an aggregation buffer 14673 * because we don't have any ECBs that contain 14674 * aggregations -- set this option to 0. 14675 */ 14676 opt[DTRACEOPT_AGGSIZE] = 0; 14677 } else { 14678 /* 14679 * If we have an aggregation buffer, we must also have 14680 * a buffer to use as scratch. 14681 */ 14682 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 14683 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 14684 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 14685 } 14686 } 14687 } 14688 14689 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 14690 opt[DTRACEOPT_SPECSIZE] != 0) { 14691 if (!state->dts_speculates) { 14692 /* 14693 * We're not going to create speculation buffers 14694 * because we don't have any ECBs that actually 14695 * speculate -- set the speculation size to 0. 14696 */ 14697 opt[DTRACEOPT_SPECSIZE] = 0; 14698 } 14699 } 14700 14701 /* 14702 * The bare minimum size for any buffer that we're actually going to 14703 * do anything to is sizeof (uint64_t). 14704 */ 14705 sz = sizeof (uint64_t); 14706 14707 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 14708 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 14709 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 14710 /* 14711 * A buffer size has been explicitly set to 0 (or to a size 14712 * that will be adjusted to 0) and we need the space -- we 14713 * need to return failure. We return ENOSPC to differentiate 14714 * it from failing to allocate a buffer due to failure to meet 14715 * the reserve (for which we return E2BIG). 14716 */ 14717 rval = ENOSPC; 14718 goto out; 14719 } 14720 14721 if ((rval = dtrace_state_buffers(state)) != 0) 14722 goto err; 14723 14724 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 14725 sz = dtrace_dstate_defsize; 14726 14727 do { 14728 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 14729 14730 if (rval == 0) 14731 break; 14732 14733 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 14734 goto err; 14735 } while (sz >>= 1); 14736 14737 opt[DTRACEOPT_DYNVARSIZE] = sz; 14738 14739 if (rval != 0) 14740 goto err; 14741 14742 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 14743 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 14744 14745 if (opt[DTRACEOPT_CLEANRATE] == 0) 14746 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14747 14748 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 14749 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 14750 14751 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 14752 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 14753 14754 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 14755 #ifdef illumos 14756 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 14757 hdlr.cyh_arg = state; 14758 hdlr.cyh_level = CY_LOW_LEVEL; 14759 14760 when.cyt_when = 0; 14761 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 14762 14763 state->dts_cleaner = cyclic_add(&hdlr, &when); 14764 14765 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 14766 hdlr.cyh_arg = state; 14767 hdlr.cyh_level = CY_LOW_LEVEL; 14768 14769 when.cyt_when = 0; 14770 when.cyt_interval = dtrace_deadman_interval; 14771 14772 state->dts_deadman = cyclic_add(&hdlr, &when); 14773 #else 14774 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 14775 dtrace_state_clean, state); 14776 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 14777 dtrace_state_deadman, state); 14778 #endif 14779 14780 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 14781 14782 #ifdef illumos 14783 if (state->dts_getf != 0 && 14784 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 14785 /* 14786 * We don't have kernel privs but we have at least one call 14787 * to getf(); we need to bump our zone's count, and (if 14788 * this is the first enabling to have an unprivileged call 14789 * to getf()) we need to hook into closef(). 14790 */ 14791 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; 14792 14793 if (dtrace_getf++ == 0) { 14794 ASSERT(dtrace_closef == NULL); 14795 dtrace_closef = dtrace_getf_barrier; 14796 } 14797 } 14798 #endif 14799 14800 /* 14801 * Now it's time to actually fire the BEGIN probe. We need to disable 14802 * interrupts here both to record the CPU on which we fired the BEGIN 14803 * probe (the data from this CPU will be processed first at user 14804 * level) and to manually activate the buffer for this CPU. 14805 */ 14806 cookie = dtrace_interrupt_disable(); 14807 *cpu = curcpu; 14808 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 14809 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 14810 14811 dtrace_probe(dtrace_probeid_begin, 14812 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 14813 dtrace_interrupt_enable(cookie); 14814 /* 14815 * We may have had an exit action from a BEGIN probe; only change our 14816 * state to ACTIVE if we're still in WARMUP. 14817 */ 14818 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 14819 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 14820 14821 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 14822 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 14823 14824 /* 14825 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 14826 * want each CPU to transition its principal buffer out of the 14827 * INACTIVE state. Doing this assures that no CPU will suddenly begin 14828 * processing an ECB halfway down a probe's ECB chain; all CPUs will 14829 * atomically transition from processing none of a state's ECBs to 14830 * processing all of them. 14831 */ 14832 dtrace_xcall(DTRACE_CPUALL, 14833 (dtrace_xcall_t)dtrace_buffer_activate, state); 14834 goto out; 14835 14836 err: 14837 dtrace_buffer_free(state->dts_buffer); 14838 dtrace_buffer_free(state->dts_aggbuffer); 14839 14840 if ((nspec = state->dts_nspeculations) == 0) { 14841 ASSERT(state->dts_speculations == NULL); 14842 goto out; 14843 } 14844 14845 spec = state->dts_speculations; 14846 ASSERT(spec != NULL); 14847 14848 for (i = 0; i < state->dts_nspeculations; i++) { 14849 if ((buf = spec[i].dtsp_buffer) == NULL) 14850 break; 14851 14852 dtrace_buffer_free(buf); 14853 kmem_free(buf, bufsize); 14854 } 14855 14856 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 14857 state->dts_nspeculations = 0; 14858 state->dts_speculations = NULL; 14859 14860 out: 14861 mutex_exit(&dtrace_lock); 14862 mutex_exit(&cpu_lock); 14863 14864 return (rval); 14865 } 14866 14867 static int 14868 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 14869 { 14870 dtrace_icookie_t cookie; 14871 14872 ASSERT(MUTEX_HELD(&dtrace_lock)); 14873 14874 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 14875 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 14876 return (EINVAL); 14877 14878 /* 14879 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 14880 * to be sure that every CPU has seen it. See below for the details 14881 * on why this is done. 14882 */ 14883 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 14884 dtrace_sync(); 14885 14886 /* 14887 * By this point, it is impossible for any CPU to be still processing 14888 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 14889 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 14890 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 14891 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 14892 * iff we're in the END probe. 14893 */ 14894 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 14895 dtrace_sync(); 14896 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 14897 14898 /* 14899 * Finally, we can release the reserve and call the END probe. We 14900 * disable interrupts across calling the END probe to allow us to 14901 * return the CPU on which we actually called the END probe. This 14902 * allows user-land to be sure that this CPU's principal buffer is 14903 * processed last. 14904 */ 14905 state->dts_reserve = 0; 14906 14907 cookie = dtrace_interrupt_disable(); 14908 *cpu = curcpu; 14909 dtrace_probe(dtrace_probeid_end, 14910 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 14911 dtrace_interrupt_enable(cookie); 14912 14913 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 14914 dtrace_sync(); 14915 14916 #ifdef illumos 14917 if (state->dts_getf != 0 && 14918 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { 14919 /* 14920 * We don't have kernel privs but we have at least one call 14921 * to getf(); we need to lower our zone's count, and (if 14922 * this is the last enabling to have an unprivileged call 14923 * to getf()) we need to clear the closef() hook. 14924 */ 14925 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); 14926 ASSERT(dtrace_closef == dtrace_getf_barrier); 14927 ASSERT(dtrace_getf > 0); 14928 14929 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; 14930 14931 if (--dtrace_getf == 0) 14932 dtrace_closef = NULL; 14933 } 14934 #endif 14935 14936 return (0); 14937 } 14938 14939 static int 14940 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 14941 dtrace_optval_t val) 14942 { 14943 ASSERT(MUTEX_HELD(&dtrace_lock)); 14944 14945 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 14946 return (EBUSY); 14947 14948 if (option >= DTRACEOPT_MAX) 14949 return (EINVAL); 14950 14951 if (option != DTRACEOPT_CPU && val < 0) 14952 return (EINVAL); 14953 14954 switch (option) { 14955 case DTRACEOPT_DESTRUCTIVE: 14956 if (dtrace_destructive_disallow) 14957 return (EACCES); 14958 14959 state->dts_cred.dcr_destructive = 1; 14960 break; 14961 14962 case DTRACEOPT_BUFSIZE: 14963 case DTRACEOPT_DYNVARSIZE: 14964 case DTRACEOPT_AGGSIZE: 14965 case DTRACEOPT_SPECSIZE: 14966 case DTRACEOPT_STRSIZE: 14967 if (val < 0) 14968 return (EINVAL); 14969 14970 if (val >= LONG_MAX) { 14971 /* 14972 * If this is an otherwise negative value, set it to 14973 * the highest multiple of 128m less than LONG_MAX. 14974 * Technically, we're adjusting the size without 14975 * regard to the buffer resizing policy, but in fact, 14976 * this has no effect -- if we set the buffer size to 14977 * ~LONG_MAX and the buffer policy is ultimately set to 14978 * be "manual", the buffer allocation is guaranteed to 14979 * fail, if only because the allocation requires two 14980 * buffers. (We set the the size to the highest 14981 * multiple of 128m because it ensures that the size 14982 * will remain a multiple of a megabyte when 14983 * repeatedly halved -- all the way down to 15m.) 14984 */ 14985 val = LONG_MAX - (1 << 27) + 1; 14986 } 14987 } 14988 14989 state->dts_options[option] = val; 14990 14991 return (0); 14992 } 14993 14994 static void 14995 dtrace_state_destroy(dtrace_state_t *state) 14996 { 14997 dtrace_ecb_t *ecb; 14998 dtrace_vstate_t *vstate = &state->dts_vstate; 14999 #ifdef illumos 15000 minor_t minor = getminor(state->dts_dev); 15001 #endif 15002 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 15003 dtrace_speculation_t *spec = state->dts_speculations; 15004 int nspec = state->dts_nspeculations; 15005 uint32_t match; 15006 15007 ASSERT(MUTEX_HELD(&dtrace_lock)); 15008 ASSERT(MUTEX_HELD(&cpu_lock)); 15009 15010 /* 15011 * First, retract any retained enablings for this state. 15012 */ 15013 dtrace_enabling_retract(state); 15014 ASSERT(state->dts_nretained == 0); 15015 15016 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 15017 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 15018 /* 15019 * We have managed to come into dtrace_state_destroy() on a 15020 * hot enabling -- almost certainly because of a disorderly 15021 * shutdown of a consumer. (That is, a consumer that is 15022 * exiting without having called dtrace_stop().) In this case, 15023 * we're going to set our activity to be KILLED, and then 15024 * issue a sync to be sure that everyone is out of probe 15025 * context before we start blowing away ECBs. 15026 */ 15027 state->dts_activity = DTRACE_ACTIVITY_KILLED; 15028 dtrace_sync(); 15029 } 15030 15031 /* 15032 * Release the credential hold we took in dtrace_state_create(). 15033 */ 15034 if (state->dts_cred.dcr_cred != NULL) 15035 crfree(state->dts_cred.dcr_cred); 15036 15037 /* 15038 * Now we can safely disable and destroy any enabled probes. Because 15039 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 15040 * (especially if they're all enabled), we take two passes through the 15041 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 15042 * in the second we disable whatever is left over. 15043 */ 15044 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 15045 for (i = 0; i < state->dts_necbs; i++) { 15046 if ((ecb = state->dts_ecbs[i]) == NULL) 15047 continue; 15048 15049 if (match && ecb->dte_probe != NULL) { 15050 dtrace_probe_t *probe = ecb->dte_probe; 15051 dtrace_provider_t *prov = probe->dtpr_provider; 15052 15053 if (!(prov->dtpv_priv.dtpp_flags & match)) 15054 continue; 15055 } 15056 15057 dtrace_ecb_disable(ecb); 15058 dtrace_ecb_destroy(ecb); 15059 } 15060 15061 if (!match) 15062 break; 15063 } 15064 15065 /* 15066 * Before we free the buffers, perform one more sync to assure that 15067 * every CPU is out of probe context. 15068 */ 15069 dtrace_sync(); 15070 15071 dtrace_buffer_free(state->dts_buffer); 15072 dtrace_buffer_free(state->dts_aggbuffer); 15073 15074 for (i = 0; i < nspec; i++) 15075 dtrace_buffer_free(spec[i].dtsp_buffer); 15076 15077 #ifdef illumos 15078 if (state->dts_cleaner != CYCLIC_NONE) 15079 cyclic_remove(state->dts_cleaner); 15080 15081 if (state->dts_deadman != CYCLIC_NONE) 15082 cyclic_remove(state->dts_deadman); 15083 #else 15084 callout_stop(&state->dts_cleaner); 15085 callout_drain(&state->dts_cleaner); 15086 callout_stop(&state->dts_deadman); 15087 callout_drain(&state->dts_deadman); 15088 #endif 15089 15090 dtrace_dstate_fini(&vstate->dtvs_dynvars); 15091 dtrace_vstate_fini(vstate); 15092 if (state->dts_ecbs != NULL) 15093 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 15094 15095 if (state->dts_aggregations != NULL) { 15096 #ifdef DEBUG 15097 for (i = 0; i < state->dts_naggregations; i++) 15098 ASSERT(state->dts_aggregations[i] == NULL); 15099 #endif 15100 ASSERT(state->dts_naggregations > 0); 15101 kmem_free(state->dts_aggregations, 15102 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 15103 } 15104 15105 kmem_free(state->dts_buffer, bufsize); 15106 kmem_free(state->dts_aggbuffer, bufsize); 15107 15108 for (i = 0; i < nspec; i++) 15109 kmem_free(spec[i].dtsp_buffer, bufsize); 15110 15111 if (spec != NULL) 15112 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 15113 15114 dtrace_format_destroy(state); 15115 15116 if (state->dts_aggid_arena != NULL) { 15117 #ifdef illumos 15118 vmem_destroy(state->dts_aggid_arena); 15119 #else 15120 delete_unrhdr(state->dts_aggid_arena); 15121 #endif 15122 state->dts_aggid_arena = NULL; 15123 } 15124 #ifdef illumos 15125 ddi_soft_state_free(dtrace_softstate, minor); 15126 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 15127 #endif 15128 } 15129 15130 /* 15131 * DTrace Anonymous Enabling Functions 15132 */ 15133 static dtrace_state_t * 15134 dtrace_anon_grab(void) 15135 { 15136 dtrace_state_t *state; 15137 15138 ASSERT(MUTEX_HELD(&dtrace_lock)); 15139 15140 if ((state = dtrace_anon.dta_state) == NULL) { 15141 ASSERT(dtrace_anon.dta_enabling == NULL); 15142 return (NULL); 15143 } 15144 15145 ASSERT(dtrace_anon.dta_enabling != NULL); 15146 ASSERT(dtrace_retained != NULL); 15147 15148 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 15149 dtrace_anon.dta_enabling = NULL; 15150 dtrace_anon.dta_state = NULL; 15151 15152 return (state); 15153 } 15154 15155 static void 15156 dtrace_anon_property(void) 15157 { 15158 int i, rv; 15159 dtrace_state_t *state; 15160 dof_hdr_t *dof; 15161 char c[32]; /* enough for "dof-data-" + digits */ 15162 15163 ASSERT(MUTEX_HELD(&dtrace_lock)); 15164 ASSERT(MUTEX_HELD(&cpu_lock)); 15165 15166 for (i = 0; ; i++) { 15167 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 15168 15169 dtrace_err_verbose = 1; 15170 15171 if ((dof = dtrace_dof_property(c)) == NULL) { 15172 dtrace_err_verbose = 0; 15173 break; 15174 } 15175 15176 #ifdef illumos 15177 /* 15178 * We want to create anonymous state, so we need to transition 15179 * the kernel debugger to indicate that DTrace is active. If 15180 * this fails (e.g. because the debugger has modified text in 15181 * some way), we won't continue with the processing. 15182 */ 15183 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15184 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 15185 "enabling ignored."); 15186 dtrace_dof_destroy(dof); 15187 break; 15188 } 15189 #endif 15190 15191 /* 15192 * If we haven't allocated an anonymous state, we'll do so now. 15193 */ 15194 if ((state = dtrace_anon.dta_state) == NULL) { 15195 #ifdef illumos 15196 state = dtrace_state_create(NULL, NULL); 15197 #else 15198 state = dtrace_state_create(NULL); 15199 #endif 15200 dtrace_anon.dta_state = state; 15201 15202 if (state == NULL) { 15203 /* 15204 * This basically shouldn't happen: the only 15205 * failure mode from dtrace_state_create() is a 15206 * failure of ddi_soft_state_zalloc() that 15207 * itself should never happen. Still, the 15208 * interface allows for a failure mode, and 15209 * we want to fail as gracefully as possible: 15210 * we'll emit an error message and cease 15211 * processing anonymous state in this case. 15212 */ 15213 cmn_err(CE_WARN, "failed to create " 15214 "anonymous state"); 15215 dtrace_dof_destroy(dof); 15216 break; 15217 } 15218 } 15219 15220 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 15221 &dtrace_anon.dta_enabling, 0, B_TRUE); 15222 15223 if (rv == 0) 15224 rv = dtrace_dof_options(dof, state); 15225 15226 dtrace_err_verbose = 0; 15227 dtrace_dof_destroy(dof); 15228 15229 if (rv != 0) { 15230 /* 15231 * This is malformed DOF; chuck any anonymous state 15232 * that we created. 15233 */ 15234 ASSERT(dtrace_anon.dta_enabling == NULL); 15235 dtrace_state_destroy(state); 15236 dtrace_anon.dta_state = NULL; 15237 break; 15238 } 15239 15240 ASSERT(dtrace_anon.dta_enabling != NULL); 15241 } 15242 15243 if (dtrace_anon.dta_enabling != NULL) { 15244 int rval; 15245 15246 /* 15247 * dtrace_enabling_retain() can only fail because we are 15248 * trying to retain more enablings than are allowed -- but 15249 * we only have one anonymous enabling, and we are guaranteed 15250 * to be allowed at least one retained enabling; we assert 15251 * that dtrace_enabling_retain() returns success. 15252 */ 15253 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 15254 ASSERT(rval == 0); 15255 15256 dtrace_enabling_dump(dtrace_anon.dta_enabling); 15257 } 15258 } 15259 15260 /* 15261 * DTrace Helper Functions 15262 */ 15263 static void 15264 dtrace_helper_trace(dtrace_helper_action_t *helper, 15265 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 15266 { 15267 uint32_t size, next, nnext, i; 15268 dtrace_helptrace_t *ent, *buffer; 15269 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 15270 15271 if ((buffer = dtrace_helptrace_buffer) == NULL) 15272 return; 15273 15274 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 15275 15276 /* 15277 * What would a tracing framework be without its own tracing 15278 * framework? (Well, a hell of a lot simpler, for starters...) 15279 */ 15280 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 15281 sizeof (uint64_t) - sizeof (uint64_t); 15282 15283 /* 15284 * Iterate until we can allocate a slot in the trace buffer. 15285 */ 15286 do { 15287 next = dtrace_helptrace_next; 15288 15289 if (next + size < dtrace_helptrace_bufsize) { 15290 nnext = next + size; 15291 } else { 15292 nnext = size; 15293 } 15294 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 15295 15296 /* 15297 * We have our slot; fill it in. 15298 */ 15299 if (nnext == size) { 15300 dtrace_helptrace_wrapped++; 15301 next = 0; 15302 } 15303 15304 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); 15305 ent->dtht_helper = helper; 15306 ent->dtht_where = where; 15307 ent->dtht_nlocals = vstate->dtvs_nlocals; 15308 15309 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 15310 mstate->dtms_fltoffs : -1; 15311 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 15312 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 15313 15314 for (i = 0; i < vstate->dtvs_nlocals; i++) { 15315 dtrace_statvar_t *svar; 15316 15317 if ((svar = vstate->dtvs_locals[i]) == NULL) 15318 continue; 15319 15320 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 15321 ent->dtht_locals[i] = 15322 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 15323 } 15324 } 15325 15326 static uint64_t 15327 dtrace_helper(int which, dtrace_mstate_t *mstate, 15328 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 15329 { 15330 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 15331 uint64_t sarg0 = mstate->dtms_arg[0]; 15332 uint64_t sarg1 = mstate->dtms_arg[1]; 15333 uint64_t rval = 0; 15334 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 15335 dtrace_helper_action_t *helper; 15336 dtrace_vstate_t *vstate; 15337 dtrace_difo_t *pred; 15338 int i, trace = dtrace_helptrace_buffer != NULL; 15339 15340 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 15341 15342 if (helpers == NULL) 15343 return (0); 15344 15345 if ((helper = helpers->dthps_actions[which]) == NULL) 15346 return (0); 15347 15348 vstate = &helpers->dthps_vstate; 15349 mstate->dtms_arg[0] = arg0; 15350 mstate->dtms_arg[1] = arg1; 15351 15352 /* 15353 * Now iterate over each helper. If its predicate evaluates to 'true', 15354 * we'll call the corresponding actions. Note that the below calls 15355 * to dtrace_dif_emulate() may set faults in machine state. This is 15356 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 15357 * the stored DIF offset with its own (which is the desired behavior). 15358 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 15359 * from machine state; this is okay, too. 15360 */ 15361 for (; helper != NULL; helper = helper->dtha_next) { 15362 if ((pred = helper->dtha_predicate) != NULL) { 15363 if (trace) 15364 dtrace_helper_trace(helper, mstate, vstate, 0); 15365 15366 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 15367 goto next; 15368 15369 if (*flags & CPU_DTRACE_FAULT) 15370 goto err; 15371 } 15372 15373 for (i = 0; i < helper->dtha_nactions; i++) { 15374 if (trace) 15375 dtrace_helper_trace(helper, 15376 mstate, vstate, i + 1); 15377 15378 rval = dtrace_dif_emulate(helper->dtha_actions[i], 15379 mstate, vstate, state); 15380 15381 if (*flags & CPU_DTRACE_FAULT) 15382 goto err; 15383 } 15384 15385 next: 15386 if (trace) 15387 dtrace_helper_trace(helper, mstate, vstate, 15388 DTRACE_HELPTRACE_NEXT); 15389 } 15390 15391 if (trace) 15392 dtrace_helper_trace(helper, mstate, vstate, 15393 DTRACE_HELPTRACE_DONE); 15394 15395 /* 15396 * Restore the arg0 that we saved upon entry. 15397 */ 15398 mstate->dtms_arg[0] = sarg0; 15399 mstate->dtms_arg[1] = sarg1; 15400 15401 return (rval); 15402 15403 err: 15404 if (trace) 15405 dtrace_helper_trace(helper, mstate, vstate, 15406 DTRACE_HELPTRACE_ERR); 15407 15408 /* 15409 * Restore the arg0 that we saved upon entry. 15410 */ 15411 mstate->dtms_arg[0] = sarg0; 15412 mstate->dtms_arg[1] = sarg1; 15413 15414 return (0); 15415 } 15416 15417 static void 15418 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 15419 dtrace_vstate_t *vstate) 15420 { 15421 int i; 15422 15423 if (helper->dtha_predicate != NULL) 15424 dtrace_difo_release(helper->dtha_predicate, vstate); 15425 15426 for (i = 0; i < helper->dtha_nactions; i++) { 15427 ASSERT(helper->dtha_actions[i] != NULL); 15428 dtrace_difo_release(helper->dtha_actions[i], vstate); 15429 } 15430 15431 kmem_free(helper->dtha_actions, 15432 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 15433 kmem_free(helper, sizeof (dtrace_helper_action_t)); 15434 } 15435 15436 static int 15437 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) 15438 { 15439 proc_t *p = curproc; 15440 dtrace_vstate_t *vstate; 15441 int i; 15442 15443 if (help == NULL) 15444 help = p->p_dtrace_helpers; 15445 15446 ASSERT(MUTEX_HELD(&dtrace_lock)); 15447 15448 if (help == NULL || gen > help->dthps_generation) 15449 return (EINVAL); 15450 15451 vstate = &help->dthps_vstate; 15452 15453 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15454 dtrace_helper_action_t *last = NULL, *h, *next; 15455 15456 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15457 next = h->dtha_next; 15458 15459 if (h->dtha_generation == gen) { 15460 if (last != NULL) { 15461 last->dtha_next = next; 15462 } else { 15463 help->dthps_actions[i] = next; 15464 } 15465 15466 dtrace_helper_action_destroy(h, vstate); 15467 } else { 15468 last = h; 15469 } 15470 } 15471 } 15472 15473 /* 15474 * Interate until we've cleared out all helper providers with the 15475 * given generation number. 15476 */ 15477 for (;;) { 15478 dtrace_helper_provider_t *prov; 15479 15480 /* 15481 * Look for a helper provider with the right generation. We 15482 * have to start back at the beginning of the list each time 15483 * because we drop dtrace_lock. It's unlikely that we'll make 15484 * more than two passes. 15485 */ 15486 for (i = 0; i < help->dthps_nprovs; i++) { 15487 prov = help->dthps_provs[i]; 15488 15489 if (prov->dthp_generation == gen) 15490 break; 15491 } 15492 15493 /* 15494 * If there were no matches, we're done. 15495 */ 15496 if (i == help->dthps_nprovs) 15497 break; 15498 15499 /* 15500 * Move the last helper provider into this slot. 15501 */ 15502 help->dthps_nprovs--; 15503 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 15504 help->dthps_provs[help->dthps_nprovs] = NULL; 15505 15506 mutex_exit(&dtrace_lock); 15507 15508 /* 15509 * If we have a meta provider, remove this helper provider. 15510 */ 15511 mutex_enter(&dtrace_meta_lock); 15512 if (dtrace_meta_pid != NULL) { 15513 ASSERT(dtrace_deferred_pid == NULL); 15514 dtrace_helper_provider_remove(&prov->dthp_prov, 15515 p->p_pid); 15516 } 15517 mutex_exit(&dtrace_meta_lock); 15518 15519 dtrace_helper_provider_destroy(prov); 15520 15521 mutex_enter(&dtrace_lock); 15522 } 15523 15524 return (0); 15525 } 15526 15527 static int 15528 dtrace_helper_validate(dtrace_helper_action_t *helper) 15529 { 15530 int err = 0, i; 15531 dtrace_difo_t *dp; 15532 15533 if ((dp = helper->dtha_predicate) != NULL) 15534 err += dtrace_difo_validate_helper(dp); 15535 15536 for (i = 0; i < helper->dtha_nactions; i++) 15537 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 15538 15539 return (err == 0); 15540 } 15541 15542 static int 15543 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, 15544 dtrace_helpers_t *help) 15545 { 15546 dtrace_helper_action_t *helper, *last; 15547 dtrace_actdesc_t *act; 15548 dtrace_vstate_t *vstate; 15549 dtrace_predicate_t *pred; 15550 int count = 0, nactions = 0, i; 15551 15552 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 15553 return (EINVAL); 15554 15555 last = help->dthps_actions[which]; 15556 vstate = &help->dthps_vstate; 15557 15558 for (count = 0; last != NULL; last = last->dtha_next) { 15559 count++; 15560 if (last->dtha_next == NULL) 15561 break; 15562 } 15563 15564 /* 15565 * If we already have dtrace_helper_actions_max helper actions for this 15566 * helper action type, we'll refuse to add a new one. 15567 */ 15568 if (count >= dtrace_helper_actions_max) 15569 return (ENOSPC); 15570 15571 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 15572 helper->dtha_generation = help->dthps_generation; 15573 15574 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 15575 ASSERT(pred->dtp_difo != NULL); 15576 dtrace_difo_hold(pred->dtp_difo); 15577 helper->dtha_predicate = pred->dtp_difo; 15578 } 15579 15580 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 15581 if (act->dtad_kind != DTRACEACT_DIFEXPR) 15582 goto err; 15583 15584 if (act->dtad_difo == NULL) 15585 goto err; 15586 15587 nactions++; 15588 } 15589 15590 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 15591 (helper->dtha_nactions = nactions), KM_SLEEP); 15592 15593 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 15594 dtrace_difo_hold(act->dtad_difo); 15595 helper->dtha_actions[i++] = act->dtad_difo; 15596 } 15597 15598 if (!dtrace_helper_validate(helper)) 15599 goto err; 15600 15601 if (last == NULL) { 15602 help->dthps_actions[which] = helper; 15603 } else { 15604 last->dtha_next = helper; 15605 } 15606 15607 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 15608 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 15609 dtrace_helptrace_next = 0; 15610 } 15611 15612 return (0); 15613 err: 15614 dtrace_helper_action_destroy(helper, vstate); 15615 return (EINVAL); 15616 } 15617 15618 static void 15619 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 15620 dof_helper_t *dofhp) 15621 { 15622 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 15623 15624 mutex_enter(&dtrace_meta_lock); 15625 mutex_enter(&dtrace_lock); 15626 15627 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 15628 /* 15629 * If the dtrace module is loaded but not attached, or if 15630 * there aren't isn't a meta provider registered to deal with 15631 * these provider descriptions, we need to postpone creating 15632 * the actual providers until later. 15633 */ 15634 15635 if (help->dthps_next == NULL && help->dthps_prev == NULL && 15636 dtrace_deferred_pid != help) { 15637 help->dthps_deferred = 1; 15638 help->dthps_pid = p->p_pid; 15639 help->dthps_next = dtrace_deferred_pid; 15640 help->dthps_prev = NULL; 15641 if (dtrace_deferred_pid != NULL) 15642 dtrace_deferred_pid->dthps_prev = help; 15643 dtrace_deferred_pid = help; 15644 } 15645 15646 mutex_exit(&dtrace_lock); 15647 15648 } else if (dofhp != NULL) { 15649 /* 15650 * If the dtrace module is loaded and we have a particular 15651 * helper provider description, pass that off to the 15652 * meta provider. 15653 */ 15654 15655 mutex_exit(&dtrace_lock); 15656 15657 dtrace_helper_provide(dofhp, p->p_pid); 15658 15659 } else { 15660 /* 15661 * Otherwise, just pass all the helper provider descriptions 15662 * off to the meta provider. 15663 */ 15664 15665 int i; 15666 mutex_exit(&dtrace_lock); 15667 15668 for (i = 0; i < help->dthps_nprovs; i++) { 15669 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 15670 p->p_pid); 15671 } 15672 } 15673 15674 mutex_exit(&dtrace_meta_lock); 15675 } 15676 15677 static int 15678 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) 15679 { 15680 dtrace_helper_provider_t *hprov, **tmp_provs; 15681 uint_t tmp_maxprovs, i; 15682 15683 ASSERT(MUTEX_HELD(&dtrace_lock)); 15684 ASSERT(help != NULL); 15685 15686 /* 15687 * If we already have dtrace_helper_providers_max helper providers, 15688 * we're refuse to add a new one. 15689 */ 15690 if (help->dthps_nprovs >= dtrace_helper_providers_max) 15691 return (ENOSPC); 15692 15693 /* 15694 * Check to make sure this isn't a duplicate. 15695 */ 15696 for (i = 0; i < help->dthps_nprovs; i++) { 15697 if (dofhp->dofhp_dof == 15698 help->dthps_provs[i]->dthp_prov.dofhp_dof) 15699 return (EALREADY); 15700 } 15701 15702 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 15703 hprov->dthp_prov = *dofhp; 15704 hprov->dthp_ref = 1; 15705 hprov->dthp_generation = gen; 15706 15707 /* 15708 * Allocate a bigger table for helper providers if it's already full. 15709 */ 15710 if (help->dthps_maxprovs == help->dthps_nprovs) { 15711 tmp_maxprovs = help->dthps_maxprovs; 15712 tmp_provs = help->dthps_provs; 15713 15714 if (help->dthps_maxprovs == 0) 15715 help->dthps_maxprovs = 2; 15716 else 15717 help->dthps_maxprovs *= 2; 15718 if (help->dthps_maxprovs > dtrace_helper_providers_max) 15719 help->dthps_maxprovs = dtrace_helper_providers_max; 15720 15721 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 15722 15723 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 15724 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15725 15726 if (tmp_provs != NULL) { 15727 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 15728 sizeof (dtrace_helper_provider_t *)); 15729 kmem_free(tmp_provs, tmp_maxprovs * 15730 sizeof (dtrace_helper_provider_t *)); 15731 } 15732 } 15733 15734 help->dthps_provs[help->dthps_nprovs] = hprov; 15735 help->dthps_nprovs++; 15736 15737 return (0); 15738 } 15739 15740 static void 15741 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 15742 { 15743 mutex_enter(&dtrace_lock); 15744 15745 if (--hprov->dthp_ref == 0) { 15746 dof_hdr_t *dof; 15747 mutex_exit(&dtrace_lock); 15748 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 15749 dtrace_dof_destroy(dof); 15750 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 15751 } else { 15752 mutex_exit(&dtrace_lock); 15753 } 15754 } 15755 15756 static int 15757 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 15758 { 15759 uintptr_t daddr = (uintptr_t)dof; 15760 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 15761 dof_provider_t *provider; 15762 dof_probe_t *probe; 15763 uint8_t *arg; 15764 char *strtab, *typestr; 15765 dof_stridx_t typeidx; 15766 size_t typesz; 15767 uint_t nprobes, j, k; 15768 15769 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 15770 15771 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 15772 dtrace_dof_error(dof, "misaligned section offset"); 15773 return (-1); 15774 } 15775 15776 /* 15777 * The section needs to be large enough to contain the DOF provider 15778 * structure appropriate for the given version. 15779 */ 15780 if (sec->dofs_size < 15781 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 15782 offsetof(dof_provider_t, dofpv_prenoffs) : 15783 sizeof (dof_provider_t))) { 15784 dtrace_dof_error(dof, "provider section too small"); 15785 return (-1); 15786 } 15787 15788 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 15789 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 15790 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 15791 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 15792 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 15793 15794 if (str_sec == NULL || prb_sec == NULL || 15795 arg_sec == NULL || off_sec == NULL) 15796 return (-1); 15797 15798 enoff_sec = NULL; 15799 15800 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 15801 provider->dofpv_prenoffs != DOF_SECT_NONE && 15802 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 15803 provider->dofpv_prenoffs)) == NULL) 15804 return (-1); 15805 15806 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 15807 15808 if (provider->dofpv_name >= str_sec->dofs_size || 15809 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 15810 dtrace_dof_error(dof, "invalid provider name"); 15811 return (-1); 15812 } 15813 15814 if (prb_sec->dofs_entsize == 0 || 15815 prb_sec->dofs_entsize > prb_sec->dofs_size) { 15816 dtrace_dof_error(dof, "invalid entry size"); 15817 return (-1); 15818 } 15819 15820 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 15821 dtrace_dof_error(dof, "misaligned entry size"); 15822 return (-1); 15823 } 15824 15825 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 15826 dtrace_dof_error(dof, "invalid entry size"); 15827 return (-1); 15828 } 15829 15830 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 15831 dtrace_dof_error(dof, "misaligned section offset"); 15832 return (-1); 15833 } 15834 15835 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 15836 dtrace_dof_error(dof, "invalid entry size"); 15837 return (-1); 15838 } 15839 15840 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 15841 15842 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 15843 15844 /* 15845 * Take a pass through the probes to check for errors. 15846 */ 15847 for (j = 0; j < nprobes; j++) { 15848 probe = (dof_probe_t *)(uintptr_t)(daddr + 15849 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 15850 15851 if (probe->dofpr_func >= str_sec->dofs_size) { 15852 dtrace_dof_error(dof, "invalid function name"); 15853 return (-1); 15854 } 15855 15856 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 15857 dtrace_dof_error(dof, "function name too long"); 15858 return (-1); 15859 } 15860 15861 if (probe->dofpr_name >= str_sec->dofs_size || 15862 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 15863 dtrace_dof_error(dof, "invalid probe name"); 15864 return (-1); 15865 } 15866 15867 /* 15868 * The offset count must not wrap the index, and the offsets 15869 * must also not overflow the section's data. 15870 */ 15871 if (probe->dofpr_offidx + probe->dofpr_noffs < 15872 probe->dofpr_offidx || 15873 (probe->dofpr_offidx + probe->dofpr_noffs) * 15874 off_sec->dofs_entsize > off_sec->dofs_size) { 15875 dtrace_dof_error(dof, "invalid probe offset"); 15876 return (-1); 15877 } 15878 15879 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 15880 /* 15881 * If there's no is-enabled offset section, make sure 15882 * there aren't any is-enabled offsets. Otherwise 15883 * perform the same checks as for probe offsets 15884 * (immediately above). 15885 */ 15886 if (enoff_sec == NULL) { 15887 if (probe->dofpr_enoffidx != 0 || 15888 probe->dofpr_nenoffs != 0) { 15889 dtrace_dof_error(dof, "is-enabled " 15890 "offsets with null section"); 15891 return (-1); 15892 } 15893 } else if (probe->dofpr_enoffidx + 15894 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 15895 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 15896 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 15897 dtrace_dof_error(dof, "invalid is-enabled " 15898 "offset"); 15899 return (-1); 15900 } 15901 15902 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 15903 dtrace_dof_error(dof, "zero probe and " 15904 "is-enabled offsets"); 15905 return (-1); 15906 } 15907 } else if (probe->dofpr_noffs == 0) { 15908 dtrace_dof_error(dof, "zero probe offsets"); 15909 return (-1); 15910 } 15911 15912 if (probe->dofpr_argidx + probe->dofpr_xargc < 15913 probe->dofpr_argidx || 15914 (probe->dofpr_argidx + probe->dofpr_xargc) * 15915 arg_sec->dofs_entsize > arg_sec->dofs_size) { 15916 dtrace_dof_error(dof, "invalid args"); 15917 return (-1); 15918 } 15919 15920 typeidx = probe->dofpr_nargv; 15921 typestr = strtab + probe->dofpr_nargv; 15922 for (k = 0; k < probe->dofpr_nargc; k++) { 15923 if (typeidx >= str_sec->dofs_size) { 15924 dtrace_dof_error(dof, "bad " 15925 "native argument type"); 15926 return (-1); 15927 } 15928 15929 typesz = strlen(typestr) + 1; 15930 if (typesz > DTRACE_ARGTYPELEN) { 15931 dtrace_dof_error(dof, "native " 15932 "argument type too long"); 15933 return (-1); 15934 } 15935 typeidx += typesz; 15936 typestr += typesz; 15937 } 15938 15939 typeidx = probe->dofpr_xargv; 15940 typestr = strtab + probe->dofpr_xargv; 15941 for (k = 0; k < probe->dofpr_xargc; k++) { 15942 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 15943 dtrace_dof_error(dof, "bad " 15944 "native argument index"); 15945 return (-1); 15946 } 15947 15948 if (typeidx >= str_sec->dofs_size) { 15949 dtrace_dof_error(dof, "bad " 15950 "translated argument type"); 15951 return (-1); 15952 } 15953 15954 typesz = strlen(typestr) + 1; 15955 if (typesz > DTRACE_ARGTYPELEN) { 15956 dtrace_dof_error(dof, "translated argument " 15957 "type too long"); 15958 return (-1); 15959 } 15960 15961 typeidx += typesz; 15962 typestr += typesz; 15963 } 15964 } 15965 15966 return (0); 15967 } 15968 15969 static int 15970 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 15971 { 15972 dtrace_helpers_t *help; 15973 dtrace_vstate_t *vstate; 15974 dtrace_enabling_t *enab = NULL; 15975 proc_t *p = curproc; 15976 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 15977 uintptr_t daddr = (uintptr_t)dof; 15978 15979 ASSERT(MUTEX_HELD(&dtrace_lock)); 15980 15981 #ifdef __FreeBSD__ 15982 if (dhp->dofhp_pid != p->p_pid) { 15983 if ((p = pfind(dhp->dofhp_pid)) == NULL) 15984 return (-1); 15985 if (!P_SHOULDSTOP(p) || 15986 (p->p_flag & P_TRACED) == 0 || 15987 p->p_pptr->p_pid != curproc->p_pid) { 15988 PROC_UNLOCK(p); 15989 return (-1); 15990 } 15991 PROC_UNLOCK(p); 15992 } 15993 #endif 15994 15995 if ((help = p->p_dtrace_helpers) == NULL) 15996 help = dtrace_helpers_create(p); 15997 15998 vstate = &help->dthps_vstate; 15999 16000 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 16001 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 16002 dtrace_dof_destroy(dof); 16003 return (rv); 16004 } 16005 16006 /* 16007 * Look for helper providers and validate their descriptions. 16008 */ 16009 if (dhp != NULL) { 16010 for (i = 0; i < dof->dofh_secnum; i++) { 16011 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 16012 dof->dofh_secoff + i * dof->dofh_secsize); 16013 16014 if (sec->dofs_type != DOF_SECT_PROVIDER) 16015 continue; 16016 16017 if (dtrace_helper_provider_validate(dof, sec) != 0) { 16018 dtrace_enabling_destroy(enab); 16019 dtrace_dof_destroy(dof); 16020 return (-1); 16021 } 16022 16023 nprovs++; 16024 } 16025 } 16026 16027 /* 16028 * Now we need to walk through the ECB descriptions in the enabling. 16029 */ 16030 for (i = 0; i < enab->dten_ndesc; i++) { 16031 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 16032 dtrace_probedesc_t *desc = &ep->dted_probe; 16033 16034 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 16035 continue; 16036 16037 if (strcmp(desc->dtpd_mod, "helper") != 0) 16038 continue; 16039 16040 if (strcmp(desc->dtpd_func, "ustack") != 0) 16041 continue; 16042 16043 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 16044 ep, help)) != 0) { 16045 /* 16046 * Adding this helper action failed -- we are now going 16047 * to rip out the entire generation and return failure. 16048 */ 16049 (void) dtrace_helper_destroygen(help, 16050 help->dthps_generation); 16051 dtrace_enabling_destroy(enab); 16052 dtrace_dof_destroy(dof); 16053 return (-1); 16054 } 16055 16056 nhelpers++; 16057 } 16058 16059 if (nhelpers < enab->dten_ndesc) 16060 dtrace_dof_error(dof, "unmatched helpers"); 16061 16062 gen = help->dthps_generation++; 16063 dtrace_enabling_destroy(enab); 16064 16065 if (dhp != NULL && nprovs > 0) { 16066 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 16067 if (dtrace_helper_provider_add(dhp, help, gen) == 0) { 16068 mutex_exit(&dtrace_lock); 16069 dtrace_helper_provider_register(p, help, dhp); 16070 mutex_enter(&dtrace_lock); 16071 16072 destroy = 0; 16073 } 16074 } 16075 16076 if (destroy) 16077 dtrace_dof_destroy(dof); 16078 16079 return (gen); 16080 } 16081 16082 static dtrace_helpers_t * 16083 dtrace_helpers_create(proc_t *p) 16084 { 16085 dtrace_helpers_t *help; 16086 16087 ASSERT(MUTEX_HELD(&dtrace_lock)); 16088 ASSERT(p->p_dtrace_helpers == NULL); 16089 16090 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 16091 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 16092 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 16093 16094 p->p_dtrace_helpers = help; 16095 dtrace_helpers++; 16096 16097 return (help); 16098 } 16099 16100 #ifdef illumos 16101 static 16102 #endif 16103 void 16104 dtrace_helpers_destroy(proc_t *p) 16105 { 16106 dtrace_helpers_t *help; 16107 dtrace_vstate_t *vstate; 16108 #ifdef illumos 16109 proc_t *p = curproc; 16110 #endif 16111 int i; 16112 16113 mutex_enter(&dtrace_lock); 16114 16115 ASSERT(p->p_dtrace_helpers != NULL); 16116 ASSERT(dtrace_helpers > 0); 16117 16118 help = p->p_dtrace_helpers; 16119 vstate = &help->dthps_vstate; 16120 16121 /* 16122 * We're now going to lose the help from this process. 16123 */ 16124 p->p_dtrace_helpers = NULL; 16125 dtrace_sync(); 16126 16127 /* 16128 * Destory the helper actions. 16129 */ 16130 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16131 dtrace_helper_action_t *h, *next; 16132 16133 for (h = help->dthps_actions[i]; h != NULL; h = next) { 16134 next = h->dtha_next; 16135 dtrace_helper_action_destroy(h, vstate); 16136 h = next; 16137 } 16138 } 16139 16140 mutex_exit(&dtrace_lock); 16141 16142 /* 16143 * Destroy the helper providers. 16144 */ 16145 if (help->dthps_maxprovs > 0) { 16146 mutex_enter(&dtrace_meta_lock); 16147 if (dtrace_meta_pid != NULL) { 16148 ASSERT(dtrace_deferred_pid == NULL); 16149 16150 for (i = 0; i < help->dthps_nprovs; i++) { 16151 dtrace_helper_provider_remove( 16152 &help->dthps_provs[i]->dthp_prov, p->p_pid); 16153 } 16154 } else { 16155 mutex_enter(&dtrace_lock); 16156 ASSERT(help->dthps_deferred == 0 || 16157 help->dthps_next != NULL || 16158 help->dthps_prev != NULL || 16159 help == dtrace_deferred_pid); 16160 16161 /* 16162 * Remove the helper from the deferred list. 16163 */ 16164 if (help->dthps_next != NULL) 16165 help->dthps_next->dthps_prev = help->dthps_prev; 16166 if (help->dthps_prev != NULL) 16167 help->dthps_prev->dthps_next = help->dthps_next; 16168 if (dtrace_deferred_pid == help) { 16169 dtrace_deferred_pid = help->dthps_next; 16170 ASSERT(help->dthps_prev == NULL); 16171 } 16172 16173 mutex_exit(&dtrace_lock); 16174 } 16175 16176 mutex_exit(&dtrace_meta_lock); 16177 16178 for (i = 0; i < help->dthps_nprovs; i++) { 16179 dtrace_helper_provider_destroy(help->dthps_provs[i]); 16180 } 16181 16182 kmem_free(help->dthps_provs, help->dthps_maxprovs * 16183 sizeof (dtrace_helper_provider_t *)); 16184 } 16185 16186 mutex_enter(&dtrace_lock); 16187 16188 dtrace_vstate_fini(&help->dthps_vstate); 16189 kmem_free(help->dthps_actions, 16190 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 16191 kmem_free(help, sizeof (dtrace_helpers_t)); 16192 16193 --dtrace_helpers; 16194 mutex_exit(&dtrace_lock); 16195 } 16196 16197 #ifdef illumos 16198 static 16199 #endif 16200 void 16201 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 16202 { 16203 dtrace_helpers_t *help, *newhelp; 16204 dtrace_helper_action_t *helper, *new, *last; 16205 dtrace_difo_t *dp; 16206 dtrace_vstate_t *vstate; 16207 int i, j, sz, hasprovs = 0; 16208 16209 mutex_enter(&dtrace_lock); 16210 ASSERT(from->p_dtrace_helpers != NULL); 16211 ASSERT(dtrace_helpers > 0); 16212 16213 help = from->p_dtrace_helpers; 16214 newhelp = dtrace_helpers_create(to); 16215 ASSERT(to->p_dtrace_helpers != NULL); 16216 16217 newhelp->dthps_generation = help->dthps_generation; 16218 vstate = &newhelp->dthps_vstate; 16219 16220 /* 16221 * Duplicate the helper actions. 16222 */ 16223 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 16224 if ((helper = help->dthps_actions[i]) == NULL) 16225 continue; 16226 16227 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 16228 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 16229 KM_SLEEP); 16230 new->dtha_generation = helper->dtha_generation; 16231 16232 if ((dp = helper->dtha_predicate) != NULL) { 16233 dp = dtrace_difo_duplicate(dp, vstate); 16234 new->dtha_predicate = dp; 16235 } 16236 16237 new->dtha_nactions = helper->dtha_nactions; 16238 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 16239 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 16240 16241 for (j = 0; j < new->dtha_nactions; j++) { 16242 dtrace_difo_t *dp = helper->dtha_actions[j]; 16243 16244 ASSERT(dp != NULL); 16245 dp = dtrace_difo_duplicate(dp, vstate); 16246 new->dtha_actions[j] = dp; 16247 } 16248 16249 if (last != NULL) { 16250 last->dtha_next = new; 16251 } else { 16252 newhelp->dthps_actions[i] = new; 16253 } 16254 16255 last = new; 16256 } 16257 } 16258 16259 /* 16260 * Duplicate the helper providers and register them with the 16261 * DTrace framework. 16262 */ 16263 if (help->dthps_nprovs > 0) { 16264 newhelp->dthps_nprovs = help->dthps_nprovs; 16265 newhelp->dthps_maxprovs = help->dthps_nprovs; 16266 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 16267 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 16268 for (i = 0; i < newhelp->dthps_nprovs; i++) { 16269 newhelp->dthps_provs[i] = help->dthps_provs[i]; 16270 newhelp->dthps_provs[i]->dthp_ref++; 16271 } 16272 16273 hasprovs = 1; 16274 } 16275 16276 mutex_exit(&dtrace_lock); 16277 16278 if (hasprovs) 16279 dtrace_helper_provider_register(to, newhelp, NULL); 16280 } 16281 16282 /* 16283 * DTrace Hook Functions 16284 */ 16285 static void 16286 dtrace_module_loaded(modctl_t *ctl) 16287 { 16288 dtrace_provider_t *prv; 16289 16290 mutex_enter(&dtrace_provider_lock); 16291 #ifdef illumos 16292 mutex_enter(&mod_lock); 16293 #endif 16294 16295 #ifdef illumos 16296 ASSERT(ctl->mod_busy); 16297 #endif 16298 16299 /* 16300 * We're going to call each providers per-module provide operation 16301 * specifying only this module. 16302 */ 16303 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 16304 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 16305 16306 #ifdef illumos 16307 mutex_exit(&mod_lock); 16308 #endif 16309 mutex_exit(&dtrace_provider_lock); 16310 16311 /* 16312 * If we have any retained enablings, we need to match against them. 16313 * Enabling probes requires that cpu_lock be held, and we cannot hold 16314 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 16315 * module. (In particular, this happens when loading scheduling 16316 * classes.) So if we have any retained enablings, we need to dispatch 16317 * our task queue to do the match for us. 16318 */ 16319 mutex_enter(&dtrace_lock); 16320 16321 if (dtrace_retained == NULL) { 16322 mutex_exit(&dtrace_lock); 16323 return; 16324 } 16325 16326 (void) taskq_dispatch(dtrace_taskq, 16327 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 16328 16329 mutex_exit(&dtrace_lock); 16330 16331 /* 16332 * And now, for a little heuristic sleaze: in general, we want to 16333 * match modules as soon as they load. However, we cannot guarantee 16334 * this, because it would lead us to the lock ordering violation 16335 * outlined above. The common case, of course, is that cpu_lock is 16336 * _not_ held -- so we delay here for a clock tick, hoping that that's 16337 * long enough for the task queue to do its work. If it's not, it's 16338 * not a serious problem -- it just means that the module that we 16339 * just loaded may not be immediately instrumentable. 16340 */ 16341 delay(1); 16342 } 16343 16344 static void 16345 #ifdef illumos 16346 dtrace_module_unloaded(modctl_t *ctl) 16347 #else 16348 dtrace_module_unloaded(modctl_t *ctl, int *error) 16349 #endif 16350 { 16351 dtrace_probe_t template, *probe, *first, *next; 16352 dtrace_provider_t *prov; 16353 #ifndef illumos 16354 char modname[DTRACE_MODNAMELEN]; 16355 size_t len; 16356 #endif 16357 16358 #ifdef illumos 16359 template.dtpr_mod = ctl->mod_modname; 16360 #else 16361 /* Handle the fact that ctl->filename may end in ".ko". */ 16362 strlcpy(modname, ctl->filename, sizeof(modname)); 16363 len = strlen(ctl->filename); 16364 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 16365 modname[len - 3] = '\0'; 16366 template.dtpr_mod = modname; 16367 #endif 16368 16369 mutex_enter(&dtrace_provider_lock); 16370 #ifdef illumos 16371 mutex_enter(&mod_lock); 16372 #endif 16373 mutex_enter(&dtrace_lock); 16374 16375 #ifndef illumos 16376 if (ctl->nenabled > 0) { 16377 /* Don't allow unloads if a probe is enabled. */ 16378 mutex_exit(&dtrace_provider_lock); 16379 mutex_exit(&dtrace_lock); 16380 *error = -1; 16381 printf( 16382 "kldunload: attempt to unload module that has DTrace probes enabled\n"); 16383 return; 16384 } 16385 #endif 16386 16387 if (dtrace_bymod == NULL) { 16388 /* 16389 * The DTrace module is loaded (obviously) but not attached; 16390 * we don't have any work to do. 16391 */ 16392 mutex_exit(&dtrace_provider_lock); 16393 #ifdef illumos 16394 mutex_exit(&mod_lock); 16395 #endif 16396 mutex_exit(&dtrace_lock); 16397 return; 16398 } 16399 16400 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 16401 probe != NULL; probe = probe->dtpr_nextmod) { 16402 if (probe->dtpr_ecb != NULL) { 16403 mutex_exit(&dtrace_provider_lock); 16404 #ifdef illumos 16405 mutex_exit(&mod_lock); 16406 #endif 16407 mutex_exit(&dtrace_lock); 16408 16409 /* 16410 * This shouldn't _actually_ be possible -- we're 16411 * unloading a module that has an enabled probe in it. 16412 * (It's normally up to the provider to make sure that 16413 * this can't happen.) However, because dtps_enable() 16414 * doesn't have a failure mode, there can be an 16415 * enable/unload race. Upshot: we don't want to 16416 * assert, but we're not going to disable the 16417 * probe, either. 16418 */ 16419 if (dtrace_err_verbose) { 16420 #ifdef illumos 16421 cmn_err(CE_WARN, "unloaded module '%s' had " 16422 "enabled probes", ctl->mod_modname); 16423 #else 16424 cmn_err(CE_WARN, "unloaded module '%s' had " 16425 "enabled probes", modname); 16426 #endif 16427 } 16428 16429 return; 16430 } 16431 } 16432 16433 probe = first; 16434 16435 for (first = NULL; probe != NULL; probe = next) { 16436 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 16437 16438 dtrace_probes[probe->dtpr_id - 1] = NULL; 16439 16440 next = probe->dtpr_nextmod; 16441 dtrace_hash_remove(dtrace_bymod, probe); 16442 dtrace_hash_remove(dtrace_byfunc, probe); 16443 dtrace_hash_remove(dtrace_byname, probe); 16444 16445 if (first == NULL) { 16446 first = probe; 16447 probe->dtpr_nextmod = NULL; 16448 } else { 16449 probe->dtpr_nextmod = first; 16450 first = probe; 16451 } 16452 } 16453 16454 /* 16455 * We've removed all of the module's probes from the hash chains and 16456 * from the probe array. Now issue a dtrace_sync() to be sure that 16457 * everyone has cleared out from any probe array processing. 16458 */ 16459 dtrace_sync(); 16460 16461 for (probe = first; probe != NULL; probe = first) { 16462 first = probe->dtpr_nextmod; 16463 prov = probe->dtpr_provider; 16464 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 16465 probe->dtpr_arg); 16466 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 16467 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 16468 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 16469 #ifdef illumos 16470 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 16471 #else 16472 free_unr(dtrace_arena, probe->dtpr_id); 16473 #endif 16474 kmem_free(probe, sizeof (dtrace_probe_t)); 16475 } 16476 16477 mutex_exit(&dtrace_lock); 16478 #ifdef illumos 16479 mutex_exit(&mod_lock); 16480 #endif 16481 mutex_exit(&dtrace_provider_lock); 16482 } 16483 16484 #ifndef illumos 16485 static void 16486 dtrace_kld_load(void *arg __unused, linker_file_t lf) 16487 { 16488 16489 dtrace_module_loaded(lf); 16490 } 16491 16492 static void 16493 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) 16494 { 16495 16496 if (*error != 0) 16497 /* We already have an error, so don't do anything. */ 16498 return; 16499 dtrace_module_unloaded(lf, error); 16500 } 16501 #endif 16502 16503 #ifdef illumos 16504 static void 16505 dtrace_suspend(void) 16506 { 16507 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 16508 } 16509 16510 static void 16511 dtrace_resume(void) 16512 { 16513 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 16514 } 16515 #endif 16516 16517 static int 16518 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 16519 { 16520 ASSERT(MUTEX_HELD(&cpu_lock)); 16521 mutex_enter(&dtrace_lock); 16522 16523 switch (what) { 16524 case CPU_CONFIG: { 16525 dtrace_state_t *state; 16526 dtrace_optval_t *opt, rs, c; 16527 16528 /* 16529 * For now, we only allocate a new buffer for anonymous state. 16530 */ 16531 if ((state = dtrace_anon.dta_state) == NULL) 16532 break; 16533 16534 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 16535 break; 16536 16537 opt = state->dts_options; 16538 c = opt[DTRACEOPT_CPU]; 16539 16540 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 16541 break; 16542 16543 /* 16544 * Regardless of what the actual policy is, we're going to 16545 * temporarily set our resize policy to be manual. We're 16546 * also going to temporarily set our CPU option to denote 16547 * the newly configured CPU. 16548 */ 16549 rs = opt[DTRACEOPT_BUFRESIZE]; 16550 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 16551 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 16552 16553 (void) dtrace_state_buffers(state); 16554 16555 opt[DTRACEOPT_BUFRESIZE] = rs; 16556 opt[DTRACEOPT_CPU] = c; 16557 16558 break; 16559 } 16560 16561 case CPU_UNCONFIG: 16562 /* 16563 * We don't free the buffer in the CPU_UNCONFIG case. (The 16564 * buffer will be freed when the consumer exits.) 16565 */ 16566 break; 16567 16568 default: 16569 break; 16570 } 16571 16572 mutex_exit(&dtrace_lock); 16573 return (0); 16574 } 16575 16576 #ifdef illumos 16577 static void 16578 dtrace_cpu_setup_initial(processorid_t cpu) 16579 { 16580 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 16581 } 16582 #endif 16583 16584 static void 16585 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 16586 { 16587 if (dtrace_toxranges >= dtrace_toxranges_max) { 16588 int osize, nsize; 16589 dtrace_toxrange_t *range; 16590 16591 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16592 16593 if (osize == 0) { 16594 ASSERT(dtrace_toxrange == NULL); 16595 ASSERT(dtrace_toxranges_max == 0); 16596 dtrace_toxranges_max = 1; 16597 } else { 16598 dtrace_toxranges_max <<= 1; 16599 } 16600 16601 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 16602 range = kmem_zalloc(nsize, KM_SLEEP); 16603 16604 if (dtrace_toxrange != NULL) { 16605 ASSERT(osize != 0); 16606 bcopy(dtrace_toxrange, range, osize); 16607 kmem_free(dtrace_toxrange, osize); 16608 } 16609 16610 dtrace_toxrange = range; 16611 } 16612 16613 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 16614 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 16615 16616 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 16617 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 16618 dtrace_toxranges++; 16619 } 16620 16621 static void 16622 dtrace_getf_barrier() 16623 { 16624 #ifdef illumos 16625 /* 16626 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings 16627 * that contain calls to getf(), this routine will be called on every 16628 * closef() before either the underlying vnode is released or the 16629 * file_t itself is freed. By the time we are here, it is essential 16630 * that the file_t can no longer be accessed from a call to getf() 16631 * in probe context -- that assures that a dtrace_sync() can be used 16632 * to clear out any enablings referring to the old structures. 16633 */ 16634 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || 16635 kcred->cr_zone->zone_dtrace_getf != 0) 16636 dtrace_sync(); 16637 #endif 16638 } 16639 16640 /* 16641 * DTrace Driver Cookbook Functions 16642 */ 16643 #ifdef illumos 16644 /*ARGSUSED*/ 16645 static int 16646 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 16647 { 16648 dtrace_provider_id_t id; 16649 dtrace_state_t *state = NULL; 16650 dtrace_enabling_t *enab; 16651 16652 mutex_enter(&cpu_lock); 16653 mutex_enter(&dtrace_provider_lock); 16654 mutex_enter(&dtrace_lock); 16655 16656 if (ddi_soft_state_init(&dtrace_softstate, 16657 sizeof (dtrace_state_t), 0) != 0) { 16658 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 16659 mutex_exit(&cpu_lock); 16660 mutex_exit(&dtrace_provider_lock); 16661 mutex_exit(&dtrace_lock); 16662 return (DDI_FAILURE); 16663 } 16664 16665 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 16666 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 16667 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 16668 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 16669 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 16670 ddi_remove_minor_node(devi, NULL); 16671 ddi_soft_state_fini(&dtrace_softstate); 16672 mutex_exit(&cpu_lock); 16673 mutex_exit(&dtrace_provider_lock); 16674 mutex_exit(&dtrace_lock); 16675 return (DDI_FAILURE); 16676 } 16677 16678 ddi_report_dev(devi); 16679 dtrace_devi = devi; 16680 16681 dtrace_modload = dtrace_module_loaded; 16682 dtrace_modunload = dtrace_module_unloaded; 16683 dtrace_cpu_init = dtrace_cpu_setup_initial; 16684 dtrace_helpers_cleanup = dtrace_helpers_destroy; 16685 dtrace_helpers_fork = dtrace_helpers_duplicate; 16686 dtrace_cpustart_init = dtrace_suspend; 16687 dtrace_cpustart_fini = dtrace_resume; 16688 dtrace_debugger_init = dtrace_suspend; 16689 dtrace_debugger_fini = dtrace_resume; 16690 16691 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16692 16693 ASSERT(MUTEX_HELD(&cpu_lock)); 16694 16695 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 16696 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 16697 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 16698 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 16699 VM_SLEEP | VMC_IDENTIFIER); 16700 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 16701 1, INT_MAX, 0); 16702 16703 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 16704 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 16705 NULL, NULL, NULL, NULL, NULL, 0); 16706 16707 ASSERT(MUTEX_HELD(&cpu_lock)); 16708 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 16709 offsetof(dtrace_probe_t, dtpr_nextmod), 16710 offsetof(dtrace_probe_t, dtpr_prevmod)); 16711 16712 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 16713 offsetof(dtrace_probe_t, dtpr_nextfunc), 16714 offsetof(dtrace_probe_t, dtpr_prevfunc)); 16715 16716 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 16717 offsetof(dtrace_probe_t, dtpr_nextname), 16718 offsetof(dtrace_probe_t, dtpr_prevname)); 16719 16720 if (dtrace_retain_max < 1) { 16721 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 16722 "setting to 1", dtrace_retain_max); 16723 dtrace_retain_max = 1; 16724 } 16725 16726 /* 16727 * Now discover our toxic ranges. 16728 */ 16729 dtrace_toxic_ranges(dtrace_toxrange_add); 16730 16731 /* 16732 * Before we register ourselves as a provider to our own framework, 16733 * we would like to assert that dtrace_provider is NULL -- but that's 16734 * not true if we were loaded as a dependency of a DTrace provider. 16735 * Once we've registered, we can assert that dtrace_provider is our 16736 * pseudo provider. 16737 */ 16738 (void) dtrace_register("dtrace", &dtrace_provider_attr, 16739 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 16740 16741 ASSERT(dtrace_provider != NULL); 16742 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 16743 16744 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 16745 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 16746 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 16747 dtrace_provider, NULL, NULL, "END", 0, NULL); 16748 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 16749 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 16750 16751 dtrace_anon_property(); 16752 mutex_exit(&cpu_lock); 16753 16754 /* 16755 * If there are already providers, we must ask them to provide their 16756 * probes, and then match any anonymous enabling against them. Note 16757 * that there should be no other retained enablings at this time: 16758 * the only retained enablings at this time should be the anonymous 16759 * enabling. 16760 */ 16761 if (dtrace_anon.dta_enabling != NULL) { 16762 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 16763 16764 dtrace_enabling_provide(NULL); 16765 state = dtrace_anon.dta_state; 16766 16767 /* 16768 * We couldn't hold cpu_lock across the above call to 16769 * dtrace_enabling_provide(), but we must hold it to actually 16770 * enable the probes. We have to drop all of our locks, pick 16771 * up cpu_lock, and regain our locks before matching the 16772 * retained anonymous enabling. 16773 */ 16774 mutex_exit(&dtrace_lock); 16775 mutex_exit(&dtrace_provider_lock); 16776 16777 mutex_enter(&cpu_lock); 16778 mutex_enter(&dtrace_provider_lock); 16779 mutex_enter(&dtrace_lock); 16780 16781 if ((enab = dtrace_anon.dta_enabling) != NULL) 16782 (void) dtrace_enabling_match(enab, NULL); 16783 16784 mutex_exit(&cpu_lock); 16785 } 16786 16787 mutex_exit(&dtrace_lock); 16788 mutex_exit(&dtrace_provider_lock); 16789 16790 if (state != NULL) { 16791 /* 16792 * If we created any anonymous state, set it going now. 16793 */ 16794 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 16795 } 16796 16797 return (DDI_SUCCESS); 16798 } 16799 #endif /* illumos */ 16800 16801 #ifndef illumos 16802 static void dtrace_dtr(void *); 16803 #endif 16804 16805 /*ARGSUSED*/ 16806 static int 16807 #ifdef illumos 16808 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 16809 #else 16810 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 16811 #endif 16812 { 16813 dtrace_state_t *state; 16814 uint32_t priv; 16815 uid_t uid; 16816 zoneid_t zoneid; 16817 16818 #ifdef illumos 16819 if (getminor(*devp) == DTRACEMNRN_HELPER) 16820 return (0); 16821 16822 /* 16823 * If this wasn't an open with the "helper" minor, then it must be 16824 * the "dtrace" minor. 16825 */ 16826 if (getminor(*devp) == DTRACEMNRN_DTRACE) 16827 return (ENXIO); 16828 #else 16829 cred_t *cred_p = NULL; 16830 cred_p = dev->si_cred; 16831 16832 /* 16833 * If no DTRACE_PRIV_* bits are set in the credential, then the 16834 * caller lacks sufficient permission to do anything with DTrace. 16835 */ 16836 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 16837 if (priv == DTRACE_PRIV_NONE) { 16838 #endif 16839 16840 return (EACCES); 16841 } 16842 16843 /* 16844 * Ask all providers to provide all their probes. 16845 */ 16846 mutex_enter(&dtrace_provider_lock); 16847 dtrace_probe_provide(NULL, NULL); 16848 mutex_exit(&dtrace_provider_lock); 16849 16850 mutex_enter(&cpu_lock); 16851 mutex_enter(&dtrace_lock); 16852 dtrace_opens++; 16853 dtrace_membar_producer(); 16854 16855 #ifdef illumos 16856 /* 16857 * If the kernel debugger is active (that is, if the kernel debugger 16858 * modified text in some way), we won't allow the open. 16859 */ 16860 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 16861 dtrace_opens--; 16862 mutex_exit(&cpu_lock); 16863 mutex_exit(&dtrace_lock); 16864 return (EBUSY); 16865 } 16866 16867 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { 16868 /* 16869 * If DTrace helper tracing is enabled, we need to allocate the 16870 * trace buffer and initialize the values. 16871 */ 16872 dtrace_helptrace_buffer = 16873 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 16874 dtrace_helptrace_next = 0; 16875 dtrace_helptrace_wrapped = 0; 16876 dtrace_helptrace_enable = 0; 16877 } 16878 16879 state = dtrace_state_create(devp, cred_p); 16880 #else 16881 state = dtrace_state_create(dev); 16882 devfs_set_cdevpriv(state, dtrace_dtr); 16883 #endif 16884 16885 mutex_exit(&cpu_lock); 16886 16887 if (state == NULL) { 16888 #ifdef illumos 16889 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 16890 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16891 #else 16892 --dtrace_opens; 16893 #endif 16894 mutex_exit(&dtrace_lock); 16895 return (EAGAIN); 16896 } 16897 16898 mutex_exit(&dtrace_lock); 16899 16900 return (0); 16901 } 16902 16903 /*ARGSUSED*/ 16904 #ifdef illumos 16905 static int 16906 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 16907 #else 16908 static void 16909 dtrace_dtr(void *data) 16910 #endif 16911 { 16912 #ifdef illumos 16913 minor_t minor = getminor(dev); 16914 dtrace_state_t *state; 16915 #endif 16916 dtrace_helptrace_t *buf = NULL; 16917 16918 #ifdef illumos 16919 if (minor == DTRACEMNRN_HELPER) 16920 return (0); 16921 16922 state = ddi_get_soft_state(dtrace_softstate, minor); 16923 #else 16924 dtrace_state_t *state = data; 16925 #endif 16926 16927 mutex_enter(&cpu_lock); 16928 mutex_enter(&dtrace_lock); 16929 16930 #ifdef illumos 16931 if (state->dts_anon) 16932 #else 16933 if (state != NULL && state->dts_anon) 16934 #endif 16935 { 16936 /* 16937 * There is anonymous state. Destroy that first. 16938 */ 16939 ASSERT(dtrace_anon.dta_state == NULL); 16940 dtrace_state_destroy(state->dts_anon); 16941 } 16942 16943 if (dtrace_helptrace_disable) { 16944 /* 16945 * If we have been told to disable helper tracing, set the 16946 * buffer to NULL before calling into dtrace_state_destroy(); 16947 * we take advantage of its dtrace_sync() to know that no 16948 * CPU is in probe context with enabled helper tracing 16949 * after it returns. 16950 */ 16951 buf = dtrace_helptrace_buffer; 16952 dtrace_helptrace_buffer = NULL; 16953 } 16954 16955 #ifdef illumos 16956 dtrace_state_destroy(state); 16957 #else 16958 if (state != NULL) { 16959 dtrace_state_destroy(state); 16960 kmem_free(state, 0); 16961 } 16962 #endif 16963 ASSERT(dtrace_opens > 0); 16964 16965 #ifdef illumos 16966 /* 16967 * Only relinquish control of the kernel debugger interface when there 16968 * are no consumers and no anonymous enablings. 16969 */ 16970 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 16971 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16972 #else 16973 --dtrace_opens; 16974 #endif 16975 16976 if (buf != NULL) { 16977 kmem_free(buf, dtrace_helptrace_bufsize); 16978 dtrace_helptrace_disable = 0; 16979 } 16980 16981 mutex_exit(&dtrace_lock); 16982 mutex_exit(&cpu_lock); 16983 16984 #ifdef illumos 16985 return (0); 16986 #endif 16987 } 16988 16989 #ifdef illumos 16990 /*ARGSUSED*/ 16991 static int 16992 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 16993 { 16994 int rval; 16995 dof_helper_t help, *dhp = NULL; 16996 16997 switch (cmd) { 16998 case DTRACEHIOC_ADDDOF: 16999 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 17000 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 17001 return (EFAULT); 17002 } 17003 17004 dhp = &help; 17005 arg = (intptr_t)help.dofhp_dof; 17006 /*FALLTHROUGH*/ 17007 17008 case DTRACEHIOC_ADD: { 17009 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 17010 17011 if (dof == NULL) 17012 return (rval); 17013 17014 mutex_enter(&dtrace_lock); 17015 17016 /* 17017 * dtrace_helper_slurp() takes responsibility for the dof -- 17018 * it may free it now or it may save it and free it later. 17019 */ 17020 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 17021 *rv = rval; 17022 rval = 0; 17023 } else { 17024 rval = EINVAL; 17025 } 17026 17027 mutex_exit(&dtrace_lock); 17028 return (rval); 17029 } 17030 17031 case DTRACEHIOC_REMOVE: { 17032 mutex_enter(&dtrace_lock); 17033 rval = dtrace_helper_destroygen(NULL, arg); 17034 mutex_exit(&dtrace_lock); 17035 17036 return (rval); 17037 } 17038 17039 default: 17040 break; 17041 } 17042 17043 return (ENOTTY); 17044 } 17045 17046 /*ARGSUSED*/ 17047 static int 17048 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 17049 { 17050 minor_t minor = getminor(dev); 17051 dtrace_state_t *state; 17052 int rval; 17053 17054 if (minor == DTRACEMNRN_HELPER) 17055 return (dtrace_ioctl_helper(cmd, arg, rv)); 17056 17057 state = ddi_get_soft_state(dtrace_softstate, minor); 17058 17059 if (state->dts_anon) { 17060 ASSERT(dtrace_anon.dta_state == NULL); 17061 state = state->dts_anon; 17062 } 17063 17064 switch (cmd) { 17065 case DTRACEIOC_PROVIDER: { 17066 dtrace_providerdesc_t pvd; 17067 dtrace_provider_t *pvp; 17068 17069 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 17070 return (EFAULT); 17071 17072 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 17073 mutex_enter(&dtrace_provider_lock); 17074 17075 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 17076 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 17077 break; 17078 } 17079 17080 mutex_exit(&dtrace_provider_lock); 17081 17082 if (pvp == NULL) 17083 return (ESRCH); 17084 17085 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 17086 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 17087 17088 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 17089 return (EFAULT); 17090 17091 return (0); 17092 } 17093 17094 case DTRACEIOC_EPROBE: { 17095 dtrace_eprobedesc_t epdesc; 17096 dtrace_ecb_t *ecb; 17097 dtrace_action_t *act; 17098 void *buf; 17099 size_t size; 17100 uintptr_t dest; 17101 int nrecs; 17102 17103 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 17104 return (EFAULT); 17105 17106 mutex_enter(&dtrace_lock); 17107 17108 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 17109 mutex_exit(&dtrace_lock); 17110 return (EINVAL); 17111 } 17112 17113 if (ecb->dte_probe == NULL) { 17114 mutex_exit(&dtrace_lock); 17115 return (EINVAL); 17116 } 17117 17118 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 17119 epdesc.dtepd_uarg = ecb->dte_uarg; 17120 epdesc.dtepd_size = ecb->dte_size; 17121 17122 nrecs = epdesc.dtepd_nrecs; 17123 epdesc.dtepd_nrecs = 0; 17124 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17125 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17126 continue; 17127 17128 epdesc.dtepd_nrecs++; 17129 } 17130 17131 /* 17132 * Now that we have the size, we need to allocate a temporary 17133 * buffer in which to store the complete description. We need 17134 * the temporary buffer to be able to drop dtrace_lock() 17135 * across the copyout(), below. 17136 */ 17137 size = sizeof (dtrace_eprobedesc_t) + 17138 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 17139 17140 buf = kmem_alloc(size, KM_SLEEP); 17141 dest = (uintptr_t)buf; 17142 17143 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 17144 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 17145 17146 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 17147 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 17148 continue; 17149 17150 if (nrecs-- == 0) 17151 break; 17152 17153 bcopy(&act->dta_rec, (void *)dest, 17154 sizeof (dtrace_recdesc_t)); 17155 dest += sizeof (dtrace_recdesc_t); 17156 } 17157 17158 mutex_exit(&dtrace_lock); 17159 17160 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17161 kmem_free(buf, size); 17162 return (EFAULT); 17163 } 17164 17165 kmem_free(buf, size); 17166 return (0); 17167 } 17168 17169 case DTRACEIOC_AGGDESC: { 17170 dtrace_aggdesc_t aggdesc; 17171 dtrace_action_t *act; 17172 dtrace_aggregation_t *agg; 17173 int nrecs; 17174 uint32_t offs; 17175 dtrace_recdesc_t *lrec; 17176 void *buf; 17177 size_t size; 17178 uintptr_t dest; 17179 17180 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 17181 return (EFAULT); 17182 17183 mutex_enter(&dtrace_lock); 17184 17185 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 17186 mutex_exit(&dtrace_lock); 17187 return (EINVAL); 17188 } 17189 17190 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 17191 17192 nrecs = aggdesc.dtagd_nrecs; 17193 aggdesc.dtagd_nrecs = 0; 17194 17195 offs = agg->dtag_base; 17196 lrec = &agg->dtag_action.dta_rec; 17197 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 17198 17199 for (act = agg->dtag_first; ; act = act->dta_next) { 17200 ASSERT(act->dta_intuple || 17201 DTRACEACT_ISAGG(act->dta_kind)); 17202 17203 /* 17204 * If this action has a record size of zero, it 17205 * denotes an argument to the aggregating action. 17206 * Because the presence of this record doesn't (or 17207 * shouldn't) affect the way the data is interpreted, 17208 * we don't copy it out to save user-level the 17209 * confusion of dealing with a zero-length record. 17210 */ 17211 if (act->dta_rec.dtrd_size == 0) { 17212 ASSERT(agg->dtag_hasarg); 17213 continue; 17214 } 17215 17216 aggdesc.dtagd_nrecs++; 17217 17218 if (act == &agg->dtag_action) 17219 break; 17220 } 17221 17222 /* 17223 * Now that we have the size, we need to allocate a temporary 17224 * buffer in which to store the complete description. We need 17225 * the temporary buffer to be able to drop dtrace_lock() 17226 * across the copyout(), below. 17227 */ 17228 size = sizeof (dtrace_aggdesc_t) + 17229 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 17230 17231 buf = kmem_alloc(size, KM_SLEEP); 17232 dest = (uintptr_t)buf; 17233 17234 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 17235 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 17236 17237 for (act = agg->dtag_first; ; act = act->dta_next) { 17238 dtrace_recdesc_t rec = act->dta_rec; 17239 17240 /* 17241 * See the comment in the above loop for why we pass 17242 * over zero-length records. 17243 */ 17244 if (rec.dtrd_size == 0) { 17245 ASSERT(agg->dtag_hasarg); 17246 continue; 17247 } 17248 17249 if (nrecs-- == 0) 17250 break; 17251 17252 rec.dtrd_offset -= offs; 17253 bcopy(&rec, (void *)dest, sizeof (rec)); 17254 dest += sizeof (dtrace_recdesc_t); 17255 17256 if (act == &agg->dtag_action) 17257 break; 17258 } 17259 17260 mutex_exit(&dtrace_lock); 17261 17262 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 17263 kmem_free(buf, size); 17264 return (EFAULT); 17265 } 17266 17267 kmem_free(buf, size); 17268 return (0); 17269 } 17270 17271 case DTRACEIOC_ENABLE: { 17272 dof_hdr_t *dof; 17273 dtrace_enabling_t *enab = NULL; 17274 dtrace_vstate_t *vstate; 17275 int err = 0; 17276 17277 *rv = 0; 17278 17279 /* 17280 * If a NULL argument has been passed, we take this as our 17281 * cue to reevaluate our enablings. 17282 */ 17283 if (arg == NULL) { 17284 dtrace_enabling_matchall(); 17285 17286 return (0); 17287 } 17288 17289 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 17290 return (rval); 17291 17292 mutex_enter(&cpu_lock); 17293 mutex_enter(&dtrace_lock); 17294 vstate = &state->dts_vstate; 17295 17296 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 17297 mutex_exit(&dtrace_lock); 17298 mutex_exit(&cpu_lock); 17299 dtrace_dof_destroy(dof); 17300 return (EBUSY); 17301 } 17302 17303 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 17304 mutex_exit(&dtrace_lock); 17305 mutex_exit(&cpu_lock); 17306 dtrace_dof_destroy(dof); 17307 return (EINVAL); 17308 } 17309 17310 if ((rval = dtrace_dof_options(dof, state)) != 0) { 17311 dtrace_enabling_destroy(enab); 17312 mutex_exit(&dtrace_lock); 17313 mutex_exit(&cpu_lock); 17314 dtrace_dof_destroy(dof); 17315 return (rval); 17316 } 17317 17318 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 17319 err = dtrace_enabling_retain(enab); 17320 } else { 17321 dtrace_enabling_destroy(enab); 17322 } 17323 17324 mutex_exit(&cpu_lock); 17325 mutex_exit(&dtrace_lock); 17326 dtrace_dof_destroy(dof); 17327 17328 return (err); 17329 } 17330 17331 case DTRACEIOC_REPLICATE: { 17332 dtrace_repldesc_t desc; 17333 dtrace_probedesc_t *match = &desc.dtrpd_match; 17334 dtrace_probedesc_t *create = &desc.dtrpd_create; 17335 int err; 17336 17337 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17338 return (EFAULT); 17339 17340 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17341 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17342 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17343 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17344 17345 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17346 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17347 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17348 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17349 17350 mutex_enter(&dtrace_lock); 17351 err = dtrace_enabling_replicate(state, match, create); 17352 mutex_exit(&dtrace_lock); 17353 17354 return (err); 17355 } 17356 17357 case DTRACEIOC_PROBEMATCH: 17358 case DTRACEIOC_PROBES: { 17359 dtrace_probe_t *probe = NULL; 17360 dtrace_probedesc_t desc; 17361 dtrace_probekey_t pkey; 17362 dtrace_id_t i; 17363 int m = 0; 17364 uint32_t priv; 17365 uid_t uid; 17366 zoneid_t zoneid; 17367 17368 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17369 return (EFAULT); 17370 17371 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 17372 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 17373 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 17374 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 17375 17376 /* 17377 * Before we attempt to match this probe, we want to give 17378 * all providers the opportunity to provide it. 17379 */ 17380 if (desc.dtpd_id == DTRACE_IDNONE) { 17381 mutex_enter(&dtrace_provider_lock); 17382 dtrace_probe_provide(&desc, NULL); 17383 mutex_exit(&dtrace_provider_lock); 17384 desc.dtpd_id++; 17385 } 17386 17387 if (cmd == DTRACEIOC_PROBEMATCH) { 17388 dtrace_probekey(&desc, &pkey); 17389 pkey.dtpk_id = DTRACE_IDNONE; 17390 } 17391 17392 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 17393 17394 mutex_enter(&dtrace_lock); 17395 17396 if (cmd == DTRACEIOC_PROBEMATCH) { 17397 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17398 if ((probe = dtrace_probes[i - 1]) != NULL && 17399 (m = dtrace_match_probe(probe, &pkey, 17400 priv, uid, zoneid)) != 0) 17401 break; 17402 } 17403 17404 if (m < 0) { 17405 mutex_exit(&dtrace_lock); 17406 return (EINVAL); 17407 } 17408 17409 } else { 17410 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 17411 if ((probe = dtrace_probes[i - 1]) != NULL && 17412 dtrace_match_priv(probe, priv, uid, zoneid)) 17413 break; 17414 } 17415 } 17416 17417 if (probe == NULL) { 17418 mutex_exit(&dtrace_lock); 17419 return (ESRCH); 17420 } 17421 17422 dtrace_probe_description(probe, &desc); 17423 mutex_exit(&dtrace_lock); 17424 17425 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17426 return (EFAULT); 17427 17428 return (0); 17429 } 17430 17431 case DTRACEIOC_PROBEARG: { 17432 dtrace_argdesc_t desc; 17433 dtrace_probe_t *probe; 17434 dtrace_provider_t *prov; 17435 17436 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17437 return (EFAULT); 17438 17439 if (desc.dtargd_id == DTRACE_IDNONE) 17440 return (EINVAL); 17441 17442 if (desc.dtargd_ndx == DTRACE_ARGNONE) 17443 return (EINVAL); 17444 17445 mutex_enter(&dtrace_provider_lock); 17446 mutex_enter(&mod_lock); 17447 mutex_enter(&dtrace_lock); 17448 17449 if (desc.dtargd_id > dtrace_nprobes) { 17450 mutex_exit(&dtrace_lock); 17451 mutex_exit(&mod_lock); 17452 mutex_exit(&dtrace_provider_lock); 17453 return (EINVAL); 17454 } 17455 17456 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 17457 mutex_exit(&dtrace_lock); 17458 mutex_exit(&mod_lock); 17459 mutex_exit(&dtrace_provider_lock); 17460 return (EINVAL); 17461 } 17462 17463 mutex_exit(&dtrace_lock); 17464 17465 prov = probe->dtpr_provider; 17466 17467 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 17468 /* 17469 * There isn't any typed information for this probe. 17470 * Set the argument number to DTRACE_ARGNONE. 17471 */ 17472 desc.dtargd_ndx = DTRACE_ARGNONE; 17473 } else { 17474 desc.dtargd_native[0] = '\0'; 17475 desc.dtargd_xlate[0] = '\0'; 17476 desc.dtargd_mapping = desc.dtargd_ndx; 17477 17478 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 17479 probe->dtpr_id, probe->dtpr_arg, &desc); 17480 } 17481 17482 mutex_exit(&mod_lock); 17483 mutex_exit(&dtrace_provider_lock); 17484 17485 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17486 return (EFAULT); 17487 17488 return (0); 17489 } 17490 17491 case DTRACEIOC_GO: { 17492 processorid_t cpuid; 17493 rval = dtrace_state_go(state, &cpuid); 17494 17495 if (rval != 0) 17496 return (rval); 17497 17498 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17499 return (EFAULT); 17500 17501 return (0); 17502 } 17503 17504 case DTRACEIOC_STOP: { 17505 processorid_t cpuid; 17506 17507 mutex_enter(&dtrace_lock); 17508 rval = dtrace_state_stop(state, &cpuid); 17509 mutex_exit(&dtrace_lock); 17510 17511 if (rval != 0) 17512 return (rval); 17513 17514 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 17515 return (EFAULT); 17516 17517 return (0); 17518 } 17519 17520 case DTRACEIOC_DOFGET: { 17521 dof_hdr_t hdr, *dof; 17522 uint64_t len; 17523 17524 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 17525 return (EFAULT); 17526 17527 mutex_enter(&dtrace_lock); 17528 dof = dtrace_dof_create(state); 17529 mutex_exit(&dtrace_lock); 17530 17531 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 17532 rval = copyout(dof, (void *)arg, len); 17533 dtrace_dof_destroy(dof); 17534 17535 return (rval == 0 ? 0 : EFAULT); 17536 } 17537 17538 case DTRACEIOC_AGGSNAP: 17539 case DTRACEIOC_BUFSNAP: { 17540 dtrace_bufdesc_t desc; 17541 caddr_t cached; 17542 dtrace_buffer_t *buf; 17543 17544 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 17545 return (EFAULT); 17546 17547 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 17548 return (EINVAL); 17549 17550 mutex_enter(&dtrace_lock); 17551 17552 if (cmd == DTRACEIOC_BUFSNAP) { 17553 buf = &state->dts_buffer[desc.dtbd_cpu]; 17554 } else { 17555 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 17556 } 17557 17558 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 17559 size_t sz = buf->dtb_offset; 17560 17561 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 17562 mutex_exit(&dtrace_lock); 17563 return (EBUSY); 17564 } 17565 17566 /* 17567 * If this buffer has already been consumed, we're 17568 * going to indicate that there's nothing left here 17569 * to consume. 17570 */ 17571 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 17572 mutex_exit(&dtrace_lock); 17573 17574 desc.dtbd_size = 0; 17575 desc.dtbd_drops = 0; 17576 desc.dtbd_errors = 0; 17577 desc.dtbd_oldest = 0; 17578 sz = sizeof (desc); 17579 17580 if (copyout(&desc, (void *)arg, sz) != 0) 17581 return (EFAULT); 17582 17583 return (0); 17584 } 17585 17586 /* 17587 * If this is a ring buffer that has wrapped, we want 17588 * to copy the whole thing out. 17589 */ 17590 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 17591 dtrace_buffer_polish(buf); 17592 sz = buf->dtb_size; 17593 } 17594 17595 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 17596 mutex_exit(&dtrace_lock); 17597 return (EFAULT); 17598 } 17599 17600 desc.dtbd_size = sz; 17601 desc.dtbd_drops = buf->dtb_drops; 17602 desc.dtbd_errors = buf->dtb_errors; 17603 desc.dtbd_oldest = buf->dtb_xamot_offset; 17604 desc.dtbd_timestamp = dtrace_gethrtime(); 17605 17606 mutex_exit(&dtrace_lock); 17607 17608 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17609 return (EFAULT); 17610 17611 buf->dtb_flags |= DTRACEBUF_CONSUMED; 17612 17613 return (0); 17614 } 17615 17616 if (buf->dtb_tomax == NULL) { 17617 ASSERT(buf->dtb_xamot == NULL); 17618 mutex_exit(&dtrace_lock); 17619 return (ENOENT); 17620 } 17621 17622 cached = buf->dtb_tomax; 17623 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 17624 17625 dtrace_xcall(desc.dtbd_cpu, 17626 (dtrace_xcall_t)dtrace_buffer_switch, buf); 17627 17628 state->dts_errors += buf->dtb_xamot_errors; 17629 17630 /* 17631 * If the buffers did not actually switch, then the cross call 17632 * did not take place -- presumably because the given CPU is 17633 * not in the ready set. If this is the case, we'll return 17634 * ENOENT. 17635 */ 17636 if (buf->dtb_tomax == cached) { 17637 ASSERT(buf->dtb_xamot != cached); 17638 mutex_exit(&dtrace_lock); 17639 return (ENOENT); 17640 } 17641 17642 ASSERT(cached == buf->dtb_xamot); 17643 17644 /* 17645 * We have our snapshot; now copy it out. 17646 */ 17647 if (copyout(buf->dtb_xamot, desc.dtbd_data, 17648 buf->dtb_xamot_offset) != 0) { 17649 mutex_exit(&dtrace_lock); 17650 return (EFAULT); 17651 } 17652 17653 desc.dtbd_size = buf->dtb_xamot_offset; 17654 desc.dtbd_drops = buf->dtb_xamot_drops; 17655 desc.dtbd_errors = buf->dtb_xamot_errors; 17656 desc.dtbd_oldest = 0; 17657 desc.dtbd_timestamp = buf->dtb_switched; 17658 17659 mutex_exit(&dtrace_lock); 17660 17661 /* 17662 * Finally, copy out the buffer description. 17663 */ 17664 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 17665 return (EFAULT); 17666 17667 return (0); 17668 } 17669 17670 case DTRACEIOC_CONF: { 17671 dtrace_conf_t conf; 17672 17673 bzero(&conf, sizeof (conf)); 17674 conf.dtc_difversion = DIF_VERSION; 17675 conf.dtc_difintregs = DIF_DIR_NREGS; 17676 conf.dtc_diftupregs = DIF_DTR_NREGS; 17677 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 17678 17679 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 17680 return (EFAULT); 17681 17682 return (0); 17683 } 17684 17685 case DTRACEIOC_STATUS: { 17686 dtrace_status_t stat; 17687 dtrace_dstate_t *dstate; 17688 int i, j; 17689 uint64_t nerrs; 17690 17691 /* 17692 * See the comment in dtrace_state_deadman() for the reason 17693 * for setting dts_laststatus to INT64_MAX before setting 17694 * it to the correct value. 17695 */ 17696 state->dts_laststatus = INT64_MAX; 17697 dtrace_membar_producer(); 17698 state->dts_laststatus = dtrace_gethrtime(); 17699 17700 bzero(&stat, sizeof (stat)); 17701 17702 mutex_enter(&dtrace_lock); 17703 17704 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 17705 mutex_exit(&dtrace_lock); 17706 return (ENOENT); 17707 } 17708 17709 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 17710 stat.dtst_exiting = 1; 17711 17712 nerrs = state->dts_errors; 17713 dstate = &state->dts_vstate.dtvs_dynvars; 17714 17715 for (i = 0; i < NCPU; i++) { 17716 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 17717 17718 stat.dtst_dyndrops += dcpu->dtdsc_drops; 17719 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 17720 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 17721 17722 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 17723 stat.dtst_filled++; 17724 17725 nerrs += state->dts_buffer[i].dtb_errors; 17726 17727 for (j = 0; j < state->dts_nspeculations; j++) { 17728 dtrace_speculation_t *spec; 17729 dtrace_buffer_t *buf; 17730 17731 spec = &state->dts_speculations[j]; 17732 buf = &spec->dtsp_buffer[i]; 17733 stat.dtst_specdrops += buf->dtb_xamot_drops; 17734 } 17735 } 17736 17737 stat.dtst_specdrops_busy = state->dts_speculations_busy; 17738 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 17739 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 17740 stat.dtst_dblerrors = state->dts_dblerrors; 17741 stat.dtst_killed = 17742 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 17743 stat.dtst_errors = nerrs; 17744 17745 mutex_exit(&dtrace_lock); 17746 17747 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 17748 return (EFAULT); 17749 17750 return (0); 17751 } 17752 17753 case DTRACEIOC_FORMAT: { 17754 dtrace_fmtdesc_t fmt; 17755 char *str; 17756 int len; 17757 17758 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 17759 return (EFAULT); 17760 17761 mutex_enter(&dtrace_lock); 17762 17763 if (fmt.dtfd_format == 0 || 17764 fmt.dtfd_format > state->dts_nformats) { 17765 mutex_exit(&dtrace_lock); 17766 return (EINVAL); 17767 } 17768 17769 /* 17770 * Format strings are allocated contiguously and they are 17771 * never freed; if a format index is less than the number 17772 * of formats, we can assert that the format map is non-NULL 17773 * and that the format for the specified index is non-NULL. 17774 */ 17775 ASSERT(state->dts_formats != NULL); 17776 str = state->dts_formats[fmt.dtfd_format - 1]; 17777 ASSERT(str != NULL); 17778 17779 len = strlen(str) + 1; 17780 17781 if (len > fmt.dtfd_length) { 17782 fmt.dtfd_length = len; 17783 17784 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 17785 mutex_exit(&dtrace_lock); 17786 return (EINVAL); 17787 } 17788 } else { 17789 if (copyout(str, fmt.dtfd_string, len) != 0) { 17790 mutex_exit(&dtrace_lock); 17791 return (EINVAL); 17792 } 17793 } 17794 17795 mutex_exit(&dtrace_lock); 17796 return (0); 17797 } 17798 17799 default: 17800 break; 17801 } 17802 17803 return (ENOTTY); 17804 } 17805 17806 /*ARGSUSED*/ 17807 static int 17808 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 17809 { 17810 dtrace_state_t *state; 17811 17812 switch (cmd) { 17813 case DDI_DETACH: 17814 break; 17815 17816 case DDI_SUSPEND: 17817 return (DDI_SUCCESS); 17818 17819 default: 17820 return (DDI_FAILURE); 17821 } 17822 17823 mutex_enter(&cpu_lock); 17824 mutex_enter(&dtrace_provider_lock); 17825 mutex_enter(&dtrace_lock); 17826 17827 ASSERT(dtrace_opens == 0); 17828 17829 if (dtrace_helpers > 0) { 17830 mutex_exit(&dtrace_provider_lock); 17831 mutex_exit(&dtrace_lock); 17832 mutex_exit(&cpu_lock); 17833 return (DDI_FAILURE); 17834 } 17835 17836 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 17837 mutex_exit(&dtrace_provider_lock); 17838 mutex_exit(&dtrace_lock); 17839 mutex_exit(&cpu_lock); 17840 return (DDI_FAILURE); 17841 } 17842 17843 dtrace_provider = NULL; 17844 17845 if ((state = dtrace_anon_grab()) != NULL) { 17846 /* 17847 * If there were ECBs on this state, the provider should 17848 * have not been allowed to detach; assert that there is 17849 * none. 17850 */ 17851 ASSERT(state->dts_necbs == 0); 17852 dtrace_state_destroy(state); 17853 17854 /* 17855 * If we're being detached with anonymous state, we need to 17856 * indicate to the kernel debugger that DTrace is now inactive. 17857 */ 17858 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 17859 } 17860 17861 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 17862 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 17863 dtrace_cpu_init = NULL; 17864 dtrace_helpers_cleanup = NULL; 17865 dtrace_helpers_fork = NULL; 17866 dtrace_cpustart_init = NULL; 17867 dtrace_cpustart_fini = NULL; 17868 dtrace_debugger_init = NULL; 17869 dtrace_debugger_fini = NULL; 17870 dtrace_modload = NULL; 17871 dtrace_modunload = NULL; 17872 17873 ASSERT(dtrace_getf == 0); 17874 ASSERT(dtrace_closef == NULL); 17875 17876 mutex_exit(&cpu_lock); 17877 17878 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 17879 dtrace_probes = NULL; 17880 dtrace_nprobes = 0; 17881 17882 dtrace_hash_destroy(dtrace_bymod); 17883 dtrace_hash_destroy(dtrace_byfunc); 17884 dtrace_hash_destroy(dtrace_byname); 17885 dtrace_bymod = NULL; 17886 dtrace_byfunc = NULL; 17887 dtrace_byname = NULL; 17888 17889 kmem_cache_destroy(dtrace_state_cache); 17890 vmem_destroy(dtrace_minor); 17891 vmem_destroy(dtrace_arena); 17892 17893 if (dtrace_toxrange != NULL) { 17894 kmem_free(dtrace_toxrange, 17895 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 17896 dtrace_toxrange = NULL; 17897 dtrace_toxranges = 0; 17898 dtrace_toxranges_max = 0; 17899 } 17900 17901 ddi_remove_minor_node(dtrace_devi, NULL); 17902 dtrace_devi = NULL; 17903 17904 ddi_soft_state_fini(&dtrace_softstate); 17905 17906 ASSERT(dtrace_vtime_references == 0); 17907 ASSERT(dtrace_opens == 0); 17908 ASSERT(dtrace_retained == NULL); 17909 17910 mutex_exit(&dtrace_lock); 17911 mutex_exit(&dtrace_provider_lock); 17912 17913 /* 17914 * We don't destroy the task queue until after we have dropped our 17915 * locks (taskq_destroy() may block on running tasks). To prevent 17916 * attempting to do work after we have effectively detached but before 17917 * the task queue has been destroyed, all tasks dispatched via the 17918 * task queue must check that DTrace is still attached before 17919 * performing any operation. 17920 */ 17921 taskq_destroy(dtrace_taskq); 17922 dtrace_taskq = NULL; 17923 17924 return (DDI_SUCCESS); 17925 } 17926 #endif 17927 17928 #ifdef illumos 17929 /*ARGSUSED*/ 17930 static int 17931 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 17932 { 17933 int error; 17934 17935 switch (infocmd) { 17936 case DDI_INFO_DEVT2DEVINFO: 17937 *result = (void *)dtrace_devi; 17938 error = DDI_SUCCESS; 17939 break; 17940 case DDI_INFO_DEVT2INSTANCE: 17941 *result = (void *)0; 17942 error = DDI_SUCCESS; 17943 break; 17944 default: 17945 error = DDI_FAILURE; 17946 } 17947 return (error); 17948 } 17949 #endif 17950 17951 #ifdef illumos 17952 static struct cb_ops dtrace_cb_ops = { 17953 dtrace_open, /* open */ 17954 dtrace_close, /* close */ 17955 nulldev, /* strategy */ 17956 nulldev, /* print */ 17957 nodev, /* dump */ 17958 nodev, /* read */ 17959 nodev, /* write */ 17960 dtrace_ioctl, /* ioctl */ 17961 nodev, /* devmap */ 17962 nodev, /* mmap */ 17963 nodev, /* segmap */ 17964 nochpoll, /* poll */ 17965 ddi_prop_op, /* cb_prop_op */ 17966 0, /* streamtab */ 17967 D_NEW | D_MP /* Driver compatibility flag */ 17968 }; 17969 17970 static struct dev_ops dtrace_ops = { 17971 DEVO_REV, /* devo_rev */ 17972 0, /* refcnt */ 17973 dtrace_info, /* get_dev_info */ 17974 nulldev, /* identify */ 17975 nulldev, /* probe */ 17976 dtrace_attach, /* attach */ 17977 dtrace_detach, /* detach */ 17978 nodev, /* reset */ 17979 &dtrace_cb_ops, /* driver operations */ 17980 NULL, /* bus operations */ 17981 nodev /* dev power */ 17982 }; 17983 17984 static struct modldrv modldrv = { 17985 &mod_driverops, /* module type (this is a pseudo driver) */ 17986 "Dynamic Tracing", /* name of module */ 17987 &dtrace_ops, /* driver ops */ 17988 }; 17989 17990 static struct modlinkage modlinkage = { 17991 MODREV_1, 17992 (void *)&modldrv, 17993 NULL 17994 }; 17995 17996 int 17997 _init(void) 17998 { 17999 return (mod_install(&modlinkage)); 18000 } 18001 18002 int 18003 _info(struct modinfo *modinfop) 18004 { 18005 return (mod_info(&modlinkage, modinfop)); 18006 } 18007 18008 int 18009 _fini(void) 18010 { 18011 return (mod_remove(&modlinkage)); 18012 } 18013 #else 18014 18015 static d_ioctl_t dtrace_ioctl; 18016 static d_ioctl_t dtrace_ioctl_helper; 18017 static void dtrace_load(void *); 18018 static int dtrace_unload(void); 18019 static struct cdev *dtrace_dev; 18020 static struct cdev *helper_dev; 18021 18022 void dtrace_invop_init(void); 18023 void dtrace_invop_uninit(void); 18024 18025 static struct cdevsw dtrace_cdevsw = { 18026 .d_version = D_VERSION, 18027 .d_ioctl = dtrace_ioctl, 18028 .d_open = dtrace_open, 18029 .d_name = "dtrace", 18030 }; 18031 18032 static struct cdevsw helper_cdevsw = { 18033 .d_version = D_VERSION, 18034 .d_ioctl = dtrace_ioctl_helper, 18035 .d_name = "helper", 18036 }; 18037 18038 #include <dtrace_anon.c> 18039 #include <dtrace_ioctl.c> 18040 #include <dtrace_load.c> 18041 #include <dtrace_modevent.c> 18042 #include <dtrace_sysctl.c> 18043 #include <dtrace_unload.c> 18044 #include <dtrace_vtime.c> 18045 #include <dtrace_hacks.c> 18046 #include <dtrace_isa.c> 18047 18048 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 18049 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 18050 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 18051 18052 DEV_MODULE(dtrace, dtrace_modevent, NULL); 18053 MODULE_VERSION(dtrace, 1); 18054 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 18055 #endif 18056